You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@flink.apache.org by GitBox <gi...@apache.org> on 2018/11/09 12:07:14 UTC

[GitHub] ambition119 closed pull request #7071: Complex Event Processing (CEP) SQL Realization

ambition119 closed pull request #7071: Complex Event Processing (CEP) SQL Realization
URL: https://github.com/apache/flink/pull/7071
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/.gitignore b/.gitignore
index 8fc9fce6fb2..20749c24242 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,6 +17,7 @@ tmp
 *.log
 .DS_Store
 build-target
+flink-end-to-end-tests/flink-datastream-allround-test/src/main/java/org/apache/flink/streaming/tests/avro/
 flink-formats/flink-avro/src/test/java/org/apache/flink/formats/avro/generated/
 flink-runtime-web/web-dashboard/assets/fonts/
 flink-runtime-web/web-dashboard/node_modules/
@@ -34,3 +35,4 @@ out/
 *.iws
 tools/flink
 tools/flink-*
+tools/releasing/release
diff --git a/.travis.yml b/.travis.yml
index cad9c875f77..2bee7e217bd 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -90,15 +90,15 @@ matrix:
 git:
   depth: 100
 
-
 env:
-    global:
-        # Global variable to avoid hanging travis builds when downloading cache archives.
-        - MALLOC_ARENA_MAX=2
-        # Build artifacts like logs (variables for apache/flink repo)
-        - secure: "gL3QRn6/XyVK+Em9RmVqpM6nbTwlhjK4/JiRYZGGCkBgTq4ZnG+Eq2qKAO22TAsqRSi7g7WAoAhUulPt0SJqH7hjMe0LetbO0izbVXDefwf2PJlsNgBbuFG6604++VUaUEyfPYYw9ADjV59LWG7+B/fjbRsevqRBZ30b1gv/tQQ="
-        - secure: "eM9r8IglvnUKctxz/ga6hwGnCpdOvGyYdGj0H/UiNDEx3Lq1A6yp3gChEIXGJqRUXDI5TaIuidunUGY7KHml8urm8eG2Yk2ttxXehZqLpEaOU2jdNJCdLX8tlVfh14T9bxG5AYHQEV3qJUqDFtfXD3whvzuinrm1oEIA3qUxiA8="
-        - secure: "EQYDWgJM5ANJ/sAFwmSEwSTOe9CDN/ENyQAr5/ntM67XanhTZj2Amgt9LthCRUU4EEPl/OFUTwNHMpv/+wa3q7dwVFldSIg5wyCndzJSATPyPBVjYgsXIQZVIjsq4TwTyrTteT55V6Oz2+l27Fvung2FPuN83ovswsJePFzMBxI="
+  global:
+    # Global variable to avoid hanging travis builds when downloading cache archives.
+    - MALLOC_ARENA_MAX=2
+    # Build artifacts like logs (variables for apache/flink repo)
+    - secure: "gL3QRn6/XyVK+Em9RmVqpM6nbTwlhjK4/JiRYZGGCkBgTq4ZnG+Eq2qKAO22TAsqRSi7g7WAoAhUulPt0SJqH7hjMe0LetbO0izbVXDefwf2PJlsNgBbuFG6604++VUaUEyfPYYw9ADjV59LWG7+B/fjbRsevqRBZ30b1gv/tQQ="
+    - secure: "eM9r8IglvnUKctxz/ga6hwGnCpdOvGyYdGj0H/UiNDEx3Lq1A6yp3gChEIXGJqRUXDI5TaIuidunUGY7KHml8urm8eG2Yk2ttxXehZqLpEaOU2jdNJCdLX8tlVfh14T9bxG5AYHQEV3qJUqDFtfXD3whvzuinrm1oEIA3qUxiA8="
+    - secure: "EQYDWgJM5ANJ/sAFwmSEwSTOe9CDN/ENyQAr5/ntM67XanhTZj2Amgt9LthCRUU4EEPl/OFUTwNHMpv/+wa3q7dwVFldSIg5wyCndzJSATPyPBVjYgsXIQZVIjsq4TwTyrTteT55V6Oz2+l27Fvung2FPuN83ovswsJePFzMBxI="
+    - DOCKER_COMPOSE_VERSION=1.22.0
 
 before_script:
    - "gem install --no-document --version 0.8.9 faraday "
@@ -113,6 +113,11 @@ before_install:
    - "export MAVEN_OPTS=\"-Dorg.slf4j.simpleLogger.showDateTime=true -Dorg.slf4j.simpleLogger.dateTimeFormat=HH:mm:ss.SSS\""
 # just in case: clean up the .m2 home and remove invalid jar files
    - 'test ! -d $HOME/.m2/repository/ || find $HOME/.m2/repository/ -name "*.jar" -exec sh -c ''if ! zip -T {} >/dev/null ; then echo "deleting invalid file: {}"; rm -f {} ; fi'' \;'
+# Installing the specified docker compose version
+   - sudo rm /usr/local/bin/docker-compose
+   - curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
+   - chmod +x docker-compose
+   - sudo mv docker-compose /usr/local/bin
 
 # We run mvn and monitor its output. If there is no output for the specified number of seconds, we
 # print the stack traces of all running Java processes.
diff --git a/docs/_config.yml b/docs/_config.yml
index d8ff020bb68..e29ba8fc9fe 100644
--- a/docs/_config.yml
+++ b/docs/_config.yml
@@ -27,12 +27,14 @@
 # we change the version for the complete docs when forking of a release branch
 # etc.
 # The full version string as referenced in Maven (e.g. 1.2.1)
-version: "1.6-SNAPSHOT"
+version: "1.6.1"
 # For stable releases, leave the bugfix version out (e.g. 1.2). For snapshot
 # release this should be the same as the regular version
-version_title: "1.6-SNAPSHOT"
-version_javadocs: "1.6-SNAPSHOT"
-version_scaladocs: "1.6-SNAPSHOT"
+version_title: "1.6"
+version_javadocs: "1.6"
+version_scaladocs: "1.6"
+# Branch on Github for this version
+github_branch: "release-1.6"
 
 # This suffix is appended to the Scala-dependent Maven artifact names
 scala_version_suffix: "_2.11"
@@ -45,10 +47,13 @@ github_url: "https://github.com/apache/flink"
 download_url: "http://flink.apache.org/downloads.html"
 
 # please use a protocol relative URL here
-baseurl: //ci.apache.org/projects/flink/flink-docs-master
+baseurl: //ci.apache.org/projects/flink/flink-docs-release-1.6
+stable_baseurl: //ci.apache.org/projects/flink/flink-docs-stable
+
+javadocs_baseurl: //ci.apache.org/projects/flink/flink-docs-release-1.6
 
 # Flag whether this is a stable version or not. Used for the quickstart page.
-is_stable: false
+is_stable: true
 
 # Flag to indicate whether an outdated warning should be shown.
 show_outdated_warning: false
diff --git a/docs/_includes/generated/akka_configuration.html b/docs/_includes/generated/akka_configuration.html
index afaba6f0fa5..f5a2a5d77f9 100644
--- a/docs/_includes/generated/akka_configuration.html
+++ b/docs/_includes/generated/akka_configuration.html
@@ -12,11 +12,41 @@
             <td style="word-wrap: break-word;">"10 s"</td>
             <td>Timeout used for all futures and blocking Akka calls. If Flink fails due to timeouts then you should try to increase this value. Timeouts can be caused by slow machines or a congested network. The timeout value requires a time-unit specifier (ms/s/min/h/d).</td>
         </tr>
+        <tr>
+            <td><h5>akka.client-socket-worker-pool.pool-size-factor</h5></td>
+            <td style="word-wrap: break-word;">1.0</td>
+            <td>The pool size factor is used to determine thread pool size using the following formula: ceil(available processors * factor). Resulting size is then bounded by the pool-size-min and pool-size-max values.</td>
+        </tr>
+        <tr>
+            <td><h5>akka.client-socket-worker-pool.pool-size-max</h5></td>
+            <td style="word-wrap: break-word;">2</td>
+            <td>Max number of threads to cap factor-based number to.</td>
+        </tr>
+        <tr>
+            <td><h5>akka.client-socket-worker-pool.pool-size-min</h5></td>
+            <td style="word-wrap: break-word;">1</td>
+            <td>Min number of threads to cap factor-based number to.</td>
+        </tr>
         <tr>
             <td><h5>akka.client.timeout</h5></td>
             <td style="word-wrap: break-word;">"60 s"</td>
             <td>Timeout for all blocking calls on the client side.</td>
         </tr>
+        <tr>
+            <td><h5>akka.fork-join-executor.parallelism-factor</h5></td>
+            <td style="word-wrap: break-word;">2.0</td>
+            <td>The parallelism factor is used to determine thread pool size using the following formula: ceil(available processors * factor). Resulting size is then bounded by the parallelism-min and parallelism-max values.</td>
+        </tr>
+        <tr>
+            <td><h5>akka.fork-join-executor.parallelism-max</h5></td>
+            <td style="word-wrap: break-word;">64</td>
+            <td>Max number of threads to cap factor-based parallelism number to.</td>
+        </tr>
+        <tr>
+            <td><h5>akka.fork-join-executor.parallelism-min</h5></td>
+            <td style="word-wrap: break-word;">8</td>
+            <td>Min number of threads to cap factor-based parallelism number to.</td>
+        </tr>
         <tr>
             <td><h5>akka.framesize</h5></td>
             <td style="word-wrap: break-word;">"10485760b"</td>
@@ -42,6 +72,21 @@
             <td style="word-wrap: break-word;">50</td>
             <td>Milliseconds a gate should be closed for after a remote connection was disconnected.</td>
         </tr>
+        <tr>
+            <td><h5>akka.server-socket-worker-pool.pool-size-factor</h5></td>
+            <td style="word-wrap: break-word;">1.0</td>
+            <td>The pool size factor is used to determine thread pool size using the following formula: ceil(available processors * factor). Resulting size is then bounded by the pool-size-min and pool-size-max values.</td>
+        </tr>
+        <tr>
+            <td><h5>akka.server-socket-worker-pool.pool-size-max</h5></td>
+            <td style="word-wrap: break-word;">2</td>
+            <td>Max number of threads to cap factor-based number to.</td>
+        </tr>
+        <tr>
+            <td><h5>akka.server-socket-worker-pool.pool-size-min</h5></td>
+            <td style="word-wrap: break-word;">1</td>
+            <td>Min number of threads to cap factor-based number to.</td>
+        </tr>
         <tr>
             <td><h5>akka.ssl.enabled</h5></td>
             <td style="word-wrap: break-word;">true</td>
@@ -80,17 +125,17 @@
         <tr>
             <td><h5>akka.watch.heartbeat.interval</h5></td>
             <td style="word-wrap: break-word;">"10 s"</td>
-            <td>Heartbeat interval for Akka’s DeathWatch mechanism to detect dead TaskManagers. If TaskManagers are wrongly marked dead because of lost or delayed heartbeat messages, then you should decrease this value or increase akka.watch.heartbeat.pause. A thorough description of Akka’s DeathWatch can be found &#60;a href="http://doc.akka.io/docs/akka/snapshot/scala/remoting.html#failure-detector"&#62;here&#60;/a&#62;.</td>
+            <td>Heartbeat interval for Akka’s DeathWatch mechanism to detect dead TaskManagers. If TaskManagers are wrongly marked dead because of lost or delayed heartbeat messages, then you should decrease this value or increase akka.watch.heartbeat.pause. A thorough description of Akka’s DeathWatch can be found <a href="http://doc.akka.io/docs/akka/snapshot/scala/remoting.html#failure-detector">here</a></td>
         </tr>
         <tr>
             <td><h5>akka.watch.heartbeat.pause</h5></td>
             <td style="word-wrap: break-word;">"60 s"</td>
-            <td>Acceptable heartbeat pause for Akka’s DeathWatch mechanism. A low value does not allow an irregular heartbeat. If TaskManagers are wrongly marked dead because of lost or delayed heartbeat messages, then you should increase this value or decrease akka.watch.heartbeat.interval. Higher value increases the time to detect a dead TaskManager. A thorough description of Akka’s DeathWatch can be found &#60;a href="http://doc.akka.io/docs/akka/snapshot/scala/remoting.html#failure-detector"&#62;here&#60;/a&#62;.</td>
+            <td>Acceptable heartbeat pause for Akka’s DeathWatch mechanism. A low value does not allow an irregular heartbeat. If TaskManagers are wrongly marked dead because of lost or delayed heartbeat messages, then you should increase this value or decrease akka.watch.heartbeat.interval. Higher value increases the time to detect a dead TaskManager. A thorough description of Akka’s DeathWatch can be found <a href="http://doc.akka.io/docs/akka/snapshot/scala/remoting.html#failure-detector">here</a></td>
         </tr>
         <tr>
             <td><h5>akka.watch.threshold</h5></td>
             <td style="word-wrap: break-word;">12</td>
-            <td>Threshold for the DeathWatch failure detector. A low value is prone to false positives whereas a high value increases the time to detect a dead TaskManager. A thorough description of Akka’s DeathWatch can be found &#60;a href="http://doc.akka.io/docs/akka/snapshot/scala/remoting.html#failure-detector"&#62;here&#60;/a&#62;.</td>
+            <td>Threshold for the DeathWatch failure detector. A low value is prone to false positives whereas a high value increases the time to detect a dead TaskManager. A thorough description of Akka’s DeathWatch can be found <a href="http://doc.akka.io/docs/akka/snapshot/scala/remoting.html#failure-detector">here</a></td>
         </tr>
     </tbody>
 </table>
diff --git a/docs/_includes/generated/core_configuration.html b/docs/_includes/generated/core_configuration.html
index 91fa1a56e06..98cca9125a0 100644
--- a/docs/_includes/generated/core_configuration.html
+++ b/docs/_includes/generated/core_configuration.html
@@ -24,7 +24,7 @@
         </tr>
         <tr>
             <td><h5>io.tmp.dirs</h5></td>
-            <td style="word-wrap: break-word;">System.getProperty("java.io.tmpdir")</td>
+            <td style="word-wrap: break-word;">'LOCAL_DIRS' on Yarn. '_FLINK_TMP_DIR' on Mesos. System.getProperty("java.io.tmpdir") in standalone.</td>
             <td></td>
         </tr>
         <tr>
diff --git a/docs/_includes/generated/environment_configuration.html b/docs/_includes/generated/environment_configuration.html
index a56a8c24520..7db585d2cec 100644
--- a/docs/_includes/generated/environment_configuration.html
+++ b/docs/_includes/generated/environment_configuration.html
@@ -7,6 +7,11 @@
         </tr>
     </thead>
     <tbody>
+        <tr>
+            <td><h5>env.hadoop.conf.dir</h5></td>
+            <td style="word-wrap: break-word;">(none)</td>
+            <td>Path to hadoop configuration directory. It is required to read HDFS and/or YARN configuration. You can also set it via environment variable.</td>
+        </tr>
         <tr>
             <td><h5>env.java.opts</h5></td>
             <td style="word-wrap: break-word;">(none)</td>
@@ -37,5 +42,10 @@
             <td style="word-wrap: break-word;">(none)</td>
             <td>Additional command line options passed to SSH clients when starting or stopping JobManager, TaskManager, and Zookeeper services (start-cluster.sh, stop-cluster.sh, start-zookeeper-quorum.sh, stop-zookeeper-quorum.sh).</td>
         </tr>
+        <tr>
+            <td><h5>env.yarn.conf.dir</h5></td>
+            <td style="word-wrap: break-word;">(none)</td>
+            <td>Path to yarn configuration directory. It is required to run flink on YARN. You can also set it via environment variable.</td>
+        </tr>
     </tbody>
 </table>
diff --git a/docs/_includes/generated/high_availability_zookeeper_configuration.html b/docs/_includes/generated/high_availability_zookeeper_configuration.html
index a49d160618a..6577878674b 100644
--- a/docs/_includes/generated/high_availability_zookeeper_configuration.html
+++ b/docs/_includes/generated/high_availability_zookeeper_configuration.html
@@ -60,7 +60,7 @@
         <tr>
             <td><h5>high-availability.zookeeper.path.mesos-workers</h5></td>
             <td style="word-wrap: break-word;">"/mesos-workers"</td>
-            <td>ZooKeeper root path (ZNode) for Mesos workers.</td>
+            <td>The ZooKeeper root path for persisting the Mesos worker information.</td>
         </tr>
         <tr>
             <td><h5>high-availability.zookeeper.path.root</h5></td>
diff --git a/docs/_includes/generated/job_manager_configuration.html b/docs/_includes/generated/job_manager_configuration.html
index 0353874c9b6..0458af24c06 100644
--- a/docs/_includes/generated/job_manager_configuration.html
+++ b/docs/_includes/generated/job_manager_configuration.html
@@ -20,7 +20,7 @@
         <tr>
             <td><h5>jobmanager.execution.failover-strategy</h5></td>
             <td style="word-wrap: break-word;">"full"</td>
-            <td>The maximum number of prior execution attempts kept in history.</td>
+            <td>This option specifies how the job computation recovers from task failures. Accepted values are:<ul><li>'full': Restarts all tasks.</li><li>'individual': Restarts only the failed task. Should only be used if all tasks are independent components.</li><li>'region': Restarts all tasks that could be affected by the task failure.</li></ul></td>
         </tr>
         <tr>
             <td><h5>jobmanager.heap.size</h5></td>
diff --git a/docs/_includes/generated/mesos_configuration.html b/docs/_includes/generated/mesos_configuration.html
index cd0ae2432e3..54e92e5680c 100644
--- a/docs/_includes/generated/mesos_configuration.html
+++ b/docs/_includes/generated/mesos_configuration.html
@@ -9,23 +9,23 @@
     <tbody>
         <tr>
             <td><h5>mesos.failover-timeout</h5></td>
-            <td style="word-wrap: break-word;">600</td>
+            <td style="word-wrap: break-word;">604800</td>
             <td>The failover timeout in seconds for the Mesos scheduler, after which running tasks are automatically shut down.</td>
         </tr>
         <tr>
             <td><h5>mesos.initial-tasks</h5></td>
             <td style="word-wrap: break-word;">0</td>
-            <td>The initial workers to bring up when the master starts</td>
+            <td>The initial workers to bring up when the master starts. This option is ignored unless Flink is in <a href="#legacy">legacy mode</a>.</td>
         </tr>
         <tr>
             <td><h5>mesos.master</h5></td>
             <td style="word-wrap: break-word;">(none)</td>
-            <td>The Mesos master URL. The value should be in one of the following forms: "host:port", "zk://host1:port1,host2:port2,.../path", "zk://username:password@host1:port1,host2:port2,.../path" or "file:///path/to/file"</td>
+            <td>The Mesos master URL. The value should be in one of the following forms: <ul><li>host:port</li><li>zk://host1:port1,host2:port2,.../path</li><li>zk://username:password@host1:port1,host2:port2,.../path</li><li>file:///path/to/file</li></ul></td>
         </tr>
         <tr>
             <td><h5>mesos.maximum-failed-tasks</h5></td>
             <td style="word-wrap: break-word;">-1</td>
-            <td>The maximum number of failed workers before the cluster fails. May be set to -1 to disable this feature</td>
+            <td>The maximum number of failed workers before the cluster fails. May be set to -1 to disable this feature. This option is ignored unless Flink is in <a href="#legacy">legacy mode</a>.</td>
         </tr>
         <tr>
             <td><h5>mesos.resourcemanager.artifactserver.port</h5></td>
@@ -65,7 +65,7 @@
         <tr>
             <td><h5>mesos.resourcemanager.tasks.port-assignments</h5></td>
             <td style="word-wrap: break-word;">(none)</td>
-            <td>Comma-separated list of configuration keys which represent a configurable port.All port keys will dynamically get a port assigned through Mesos.</td>
+            <td>Comma-separated list of configuration keys which represent a configurable port. All port keys will dynamically get a port assigned through Mesos.</td>
         </tr>
     </tbody>
 </table>
diff --git a/docs/_includes/generated/mesos_task_manager_configuration.html b/docs/_includes/generated/mesos_task_manager_configuration.html
index 0af844dbceb..1e67f8429d7 100644
--- a/docs/_includes/generated/mesos_task_manager_configuration.html
+++ b/docs/_includes/generated/mesos_task_manager_configuration.html
@@ -10,12 +10,12 @@
         <tr>
             <td><h5>mesos.constraints.hard.hostattribute</h5></td>
             <td style="word-wrap: break-word;">(none)</td>
-            <td>Constraints for task placement on mesos.</td>
+            <td>Constraints for task placement on Mesos based on agent attributes. Takes a comma-separated list of key:value pairs corresponding to the attributes exposed by the target mesos agents. Example: az:eu-west-1a,series:t2</td>
         </tr>
         <tr>
             <td><h5>mesos.resourcemanager.tasks.bootstrap-cmd</h5></td>
             <td style="word-wrap: break-word;">(none)</td>
-            <td></td>
+            <td>A command which is executed before the TaskManager is started.</td>
         </tr>
         <tr>
             <td><h5>mesos.resourcemanager.tasks.container.docker.force-pull-image</h5></td>
@@ -50,12 +50,12 @@
         <tr>
             <td><h5>mesos.resourcemanager.tasks.gpus</h5></td>
             <td style="word-wrap: break-word;">0</td>
-            <td></td>
+            <td>GPUs to assign to the Mesos workers.</td>
         </tr>
         <tr>
             <td><h5>mesos.resourcemanager.tasks.hostname</h5></td>
             <td style="word-wrap: break-word;">(none)</td>
-            <td></td>
+            <td>Optional value to define the TaskManager’s hostname. The pattern _TASK_ is replaced by the actual id of the Mesos task. This can be used to configure the TaskManager to use Mesos DNS (e.g. _TASK_.flink-service.mesos) for name lookups.</td>
         </tr>
         <tr>
             <td><h5>mesos.resourcemanager.tasks.mem</h5></td>
diff --git a/docs/_includes/generated/metric_configuration.html b/docs/_includes/generated/metric_configuration.html
index 868a9bd66e9..0fe9d0c36f8 100644
--- a/docs/_includes/generated/metric_configuration.html
+++ b/docs/_includes/generated/metric_configuration.html
@@ -7,25 +7,35 @@
         </tr>
     </thead>
     <tbody>
+        <tr>
+            <td><h5>metrics.latency.granularity</h5></td>
+            <td style="word-wrap: break-word;">"subtask"</td>
+            <td>Defines the granularity of latency metrics. Accepted values are:<ul><li>single - Track latency without differentiating between sources and subtasks.</li><li>operator - Track latency while differentiating between sources, but not subtasks.</li><li>subtask - Track latency while differentiating between sources and subtasks.</li></ul></td>
+        </tr>
         <tr>
             <td><h5>metrics.latency.history-size</h5></td>
             <td style="word-wrap: break-word;">128</td>
             <td>Defines the number of measured latencies to maintain at each operator.</td>
         </tr>
         <tr>
-            <td><h5>metrics.reporter.&#60;name&#62;.&#60;parameter&#62;</h5></td>
+            <td><h5>metrics.latency.interval</h5></td>
+            <td style="word-wrap: break-word;">2000</td>
+            <td>Defines the interval at which latency tracking marks are emitted from the sources. Disables latency tracking if set to 0 or a negative value. Enabling this feature can significantly impact the performance of the cluster.</td>
+        </tr>
+        <tr>
+            <td><h5>metrics.reporter.&lt;name&gt;.&lt;parameter&gt;</h5></td>
             <td style="word-wrap: break-word;">(none)</td>
-            <td>Configures the parameter &#60;parameter&#62; for the reporter named &#60;name&#62;.</td>
+            <td>Configures the parameter &lt;parameter&gt; for the reporter named &lt;name&gt;.</td>
         </tr>
         <tr>
-            <td><h5>metrics.reporter.&#60;name&#62;.class</h5></td>
+            <td><h5>metrics.reporter.&lt;name&gt;.class</h5></td>
             <td style="word-wrap: break-word;">(none)</td>
-            <td>The reporter class to use for the reporter named &#60;name&#62;.</td>
+            <td>The reporter class to use for the reporter named &lt;name&gt;.</td>
         </tr>
         <tr>
-            <td><h5>metrics.reporter.&#60;name&#62;.interval</h5></td>
+            <td><h5>metrics.reporter.&lt;name&gt;.interval</h5></td>
             <td style="word-wrap: break-word;">(none)</td>
-            <td>The reporter interval to use for the reporter named &#60;name&#62;.</td>
+            <td>The reporter interval to use for the reporter named &lt;name&gt;.</td>
         </tr>
         <tr>
             <td><h5>metrics.reporters</h5></td>
@@ -39,32 +49,32 @@
         </tr>
         <tr>
             <td><h5>metrics.scope.jm</h5></td>
-            <td style="word-wrap: break-word;">"&#60;host&#62;.jobmanager"</td>
+            <td style="word-wrap: break-word;">"&lt;host&gt;.jobmanager"</td>
             <td>Defines the scope format string that is applied to all metrics scoped to a JobManager.</td>
         </tr>
         <tr>
             <td><h5>metrics.scope.jm.job</h5></td>
-            <td style="word-wrap: break-word;">"&#60;host&#62;.jobmanager.&#60;job_name&#62;"</td>
+            <td style="word-wrap: break-word;">"&lt;host&gt;.jobmanager.&lt;job_name&gt;"</td>
             <td>Defines the scope format string that is applied to all metrics scoped to a job on a JobManager.</td>
         </tr>
         <tr>
             <td><h5>metrics.scope.operator</h5></td>
-            <td style="word-wrap: break-word;">"&#60;host&#62;.taskmanager.&#60;tm_id&#62;.&#60;job_name&#62;.&#60;operator_name&#62;.&#60;subtask_index&#62;"</td>
+            <td style="word-wrap: break-word;">"&lt;host&gt;.taskmanager.&lt;tm_id&gt;.&lt;job_name&gt;.&lt;operator_name&gt;.&lt;subtask_index&gt;"</td>
             <td>Defines the scope format string that is applied to all metrics scoped to an operator.</td>
         </tr>
         <tr>
             <td><h5>metrics.scope.task</h5></td>
-            <td style="word-wrap: break-word;">"&#60;host&#62;.taskmanager.&#60;tm_id&#62;.&#60;job_name&#62;.&#60;task_name&#62;.&#60;subtask_index&#62;"</td>
+            <td style="word-wrap: break-word;">"&lt;host&gt;.taskmanager.&lt;tm_id&gt;.&lt;job_name&gt;.&lt;task_name&gt;.&lt;subtask_index&gt;"</td>
             <td>Defines the scope format string that is applied to all metrics scoped to a task.</td>
         </tr>
         <tr>
             <td><h5>metrics.scope.tm</h5></td>
-            <td style="word-wrap: break-word;">"&#60;host&#62;.taskmanager.&#60;tm_id&#62;"</td>
+            <td style="word-wrap: break-word;">"&lt;host&gt;.taskmanager.&lt;tm_id&gt;"</td>
             <td>Defines the scope format string that is applied to all metrics scoped to a TaskManager.</td>
         </tr>
         <tr>
             <td><h5>metrics.scope.tm.job</h5></td>
-            <td style="word-wrap: break-word;">"&#60;host&#62;.taskmanager.&#60;tm_id&#62;.&#60;job_name&#62;"</td>
+            <td style="word-wrap: break-word;">"&lt;host&gt;.taskmanager.&lt;tm_id&gt;.&lt;job_name&gt;"</td>
             <td>Defines the scope format string that is applied to all metrics scoped to a job on a TaskManager.</td>
         </tr>
     </tbody>
diff --git a/docs/_includes/generated/resource_manager_configuration.html b/docs/_includes/generated/resource_manager_configuration.html
index 1b82e51b4ef..9243fcd3cb9 100644
--- a/docs/_includes/generated/resource_manager_configuration.html
+++ b/docs/_includes/generated/resource_manager_configuration.html
@@ -32,5 +32,10 @@
             <td style="word-wrap: break-word;">0</td>
             <td>Defines the network port to connect to for communication with the resource manager. By default, the port of the JobManager, because the same ActorSystem is used. Its not possible to use this configuration key to define port ranges.</td>
         </tr>
+        <tr>
+            <td><h5>resourcemanager.taskmanager-timeout</h5></td>
+            <td style="word-wrap: break-word;">30000</td>
+            <td>The timeout for an idle task manager to be released.</td>
+        </tr>
     </tbody>
 </table>
diff --git a/docs/_includes/generated/rest_configuration.html b/docs/_includes/generated/rest_configuration.html
index 1de41654eb7..2c5f539a480 100644
--- a/docs/_includes/generated/rest_configuration.html
+++ b/docs/_includes/generated/rest_configuration.html
@@ -32,6 +32,11 @@
             <td style="word-wrap: break-word;">15000</td>
             <td>The maximum time in ms for the client to establish a TCP connection.</td>
         </tr>
+        <tr>
+            <td><h5>rest.idleness-timeout</h5></td>
+            <td style="word-wrap: break-word;">300000</td>
+            <td>The maximum time in ms for a connection to stay idle before failing.</td>
+        </tr>
         <tr>
             <td><h5>rest.port</h5></td>
             <td style="word-wrap: break-word;">8081</td>
@@ -52,5 +57,15 @@
             <td style="word-wrap: break-word;">104857600</td>
             <td>The maximum content length in bytes that the server will handle.</td>
         </tr>
+        <tr>
+            <td><h5>rest.server.numThreads</h5></td>
+            <td style="word-wrap: break-word;">4</td>
+            <td>The number of threads for the asynchronous processing of requests.</td>
+        </tr>
+        <tr>
+            <td><h5>rest.server.thread-priority</h5></td>
+            <td style="word-wrap: break-word;">5</td>
+            <td>Thread priority of the REST server's executor for processing asynchronous requests. Lowering the thread priority will give Flink's main components more CPU time whereas increasing will allocate more time for the REST server's processing.</td>
+        </tr>
     </tbody>
 </table>
diff --git a/docs/_includes/generated/rest_dispatcher.html b/docs/_includes/generated/rest_dispatcher.html
index 6ed59becf21..cd05922c621 100644
--- a/docs/_includes/generated/rest_dispatcher.html
+++ b/docs/_includes/generated/rest_dispatcher.html
@@ -178,7 +178,7 @@
     </tr>
     <tr>
       <td colspan="2">Uploads a jar to the cluster. The jar must be sent as multi-part data. Make sure that the "Content-Type" header is set to "application/x-java-archive", as some http libraries do not add the header by default.
-Using 'curl' you can upload a jar via 'curl -X POST -H "Expect:" -F "jarfile=#path/to/flink-job.jar" http://hostname:port/jars/upload'.</td>
+Using 'curl' you can upload a jar via 'curl -X POST -H "Expect:" -F "jarfile=@path/to/flink-job.jar" http://hostname:port/jars/upload'.</td>
     </tr>
     <tr>
       <td colspan="2">
@@ -360,11 +360,31 @@
     </tr>
     <tr>
       <td colspan="2">
-        <button data-toggle="collapse" data-target="#1936993190">Request</button>
-        <div id="1936993190" class="collapse">
+        <button data-toggle="collapse" data-target="#315035146">Request</button>
+        <div id="315035146" class="collapse">
           <pre>
             <code>
-{}            </code>
+{
+  "type" : "object",
+  "id" : "urn:jsonschema:org:apache:flink:runtime:webmonitor:handlers:JarRunRequestBody",
+  "properties" : {
+    "entryClass" : {
+      "type" : "string"
+    },
+    "programArgs" : {
+      "type" : "string"
+    },
+    "parallelism" : {
+      "type" : "integer"
+    },
+    "allowNonRestoredState" : {
+      "type" : "boolean"
+    },
+    "savepointPath" : {
+      "type" : "string"
+    }
+  }
+}            </code>
           </pre>
          </div>
       </td>
diff --git a/docs/_includes/generated/rocks_db_configuration.html b/docs/_includes/generated/rocks_db_configuration.html
index 57b95114976..8983f8b41dd 100644
--- a/docs/_includes/generated/rocks_db_configuration.html
+++ b/docs/_includes/generated/rocks_db_configuration.html
@@ -13,9 +13,9 @@
             <td>The local directory (on the TaskManager) where RocksDB puts its files.</td>
         </tr>
         <tr>
-            <td><h5>state.backend.rocksdb.timer-service.impl</h5></td>
+            <td><h5>state.backend.rocksdb.timer-service.factory</h5></td>
             <td style="word-wrap: break-word;">"HEAP"</td>
-            <td>This determines the timer service implementation. Options are either HEAP (heap-based, default) or ROCKSDB for an implementation based on RocksDB.</td>
+            <td>This determines the factory for timer service state implementation. Options are either HEAP (heap-based, default) or ROCKSDB for an implementation based on RocksDB .</td>
         </tr>
     </tbody>
 </table>
diff --git a/docs/_includes/generated/security_configuration.html b/docs/_includes/generated/security_configuration.html
index 5042cf3df01..8999336926f 100644
--- a/docs/_includes/generated/security_configuration.html
+++ b/docs/_includes/generated/security_configuration.html
@@ -10,13 +10,23 @@
         <tr>
             <td><h5>security.ssl.algorithms</h5></td>
             <td style="word-wrap: break-word;">"TLS_RSA_WITH_AES_128_CBC_SHA"</td>
-            <td>The comma separated list of standard SSL algorithms to be supported. Read more &#60;a href="http://docs.oracle.com/javase/8/docs/technotes/guides/security/StandardNames.html#ciphersuites"&#62;here&#60;/a&#62;.</td>
+            <td>The comma separated list of standard SSL algorithms to be supported. Read more <a href="http://docs.oracle.com/javase/8/docs/technotes/guides/security/StandardNames.html#ciphersuites">here</a></td>
+        </tr>
+        <tr>
+            <td><h5>security.ssl.internal.close-notify-flush-timeout</h5></td>
+            <td style="word-wrap: break-word;">-1</td>
+            <td>The timeout (in ms) for flushing the `close_notify` that was triggered by closing a channel. If the `close_notify` was not flushed in the given timeout the channel will be closed forcibly. (-1 = use system default)</td>
         </tr>
         <tr>
             <td><h5>security.ssl.internal.enabled</h5></td>
             <td style="word-wrap: break-word;">false</td>
             <td>Turns on SSL for internal network communication. Optionally, specific components may override this through their own settings (rpc, data transport, REST, etc).</td>
         </tr>
+        <tr>
+            <td><h5>security.ssl.internal.handshake-timeout</h5></td>
+            <td style="word-wrap: break-word;">-1</td>
+            <td>The timeout (in ms) during SSL handshake. (-1 = use system default)</td>
+        </tr>
         <tr>
             <td><h5>security.ssl.internal.key-password</h5></td>
             <td style="word-wrap: break-word;">(none)</td>
@@ -32,6 +42,16 @@
             <td style="word-wrap: break-word;">(none)</td>
             <td>The secret to decrypt the keystore file for Flink's for Flink's internal endpoints (rpc, data transport, blob server).</td>
         </tr>
+        <tr>
+            <td><h5>security.ssl.internal.session-cache-size</h5></td>
+            <td style="word-wrap: break-word;">-1</td>
+            <td>The size of the cache used for storing SSL session objects. According to https://github.com/netty/netty/issues/832, you should always set this to an appropriate number to not run into a bug with stalling IO threads during garbage collection. (-1 = use system default).</td>
+        </tr>
+        <tr>
+            <td><h5>security.ssl.internal.session-timeout</h5></td>
+            <td style="word-wrap: break-word;">-1</td>
+            <td>The timeout (in ms) for the cached SSL session objects. (-1 = use system default)</td>
+        </tr>
         <tr>
             <td><h5>security.ssl.internal.truststore</h5></td>
             <td style="word-wrap: break-word;">(none)</td>
@@ -62,6 +82,11 @@
             <td style="word-wrap: break-word;">"TLSv1.2"</td>
             <td>The SSL protocol version to be supported for the ssl transport. Note that it doesn’t support comma separated list.</td>
         </tr>
+        <tr>
+            <td><h5>security.ssl.rest.authentication-enabled</h5></td>
+            <td style="word-wrap: break-word;">false</td>
+            <td>Turns on mutual SSL authentication for external communication via the REST endpoints.</td>
+        </tr>
         <tr>
             <td><h5>security.ssl.rest.enabled</h5></td>
             <td style="word-wrap: break-word;">false</td>
diff --git a/docs/_includes/generated/slot_manager_configuration.html b/docs/_includes/generated/slot_manager_configuration.html
deleted file mode 100644
index 1517a395f27..00000000000
--- a/docs/_includes/generated/slot_manager_configuration.html
+++ /dev/null
@@ -1,21 +0,0 @@
-<table class="table table-bordered">
-    <thead>
-        <tr>
-            <th class="text-left" style="width: 20%">Key</th>
-            <th class="text-left" style="width: 15%">Default</th>
-            <th class="text-left" style="width: 65%">Description</th>
-        </tr>
-    </thead>
-    <tbody>
-        <tr>
-            <td><h5>slotmanager.request-timeout</h5></td>
-            <td style="word-wrap: break-word;">600000</td>
-            <td>The timeout for a slot request to be discarded.</td>
-        </tr>
-        <tr>
-            <td><h5>slotmanager.taskmanager-timeout</h5></td>
-            <td style="word-wrap: break-word;">30000</td>
-            <td>The timeout for an idle task manager to be released.</td>
-        </tr>
-    </tbody>
-</table>
diff --git a/docs/_includes/generated/yarn_config_configuration.html b/docs/_includes/generated/yarn_config_configuration.html
index fb16e5340c8..bbe25499f17 100644
--- a/docs/_includes/generated/yarn_config_configuration.html
+++ b/docs/_includes/generated/yarn_config_configuration.html
@@ -30,7 +30,7 @@
         <tr>
             <td><h5>yarn.containers.vcores</h5></td>
             <td style="word-wrap: break-word;">-1</td>
-            <td>The number of virtual cores (vcores) per YARN container. By default, the number of vcores is set to the number of slots per TaskManager, if set, or to 1, otherwise.</td>
+            <td>The number of virtual cores (vcores) per YARN container. By default, the number of vcores is set to the number of slots per TaskManager, if set, or to 1, otherwise. In order for this parameter to be used your cluster must have CPU scheduling enabled. You can do this by setting the <span markdown="span">`org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler`</span>.</td>
         </tr>
         <tr>
             <td><h5>yarn.heartbeat-delay</h5></td>
diff --git a/docs/_includes/sidenav.html b/docs/_includes/sidenav.html
index 5e50c350d6b..5c5fb51d126 100644
--- a/docs/_includes/sidenav.html
+++ b/docs/_includes/sidenav.html
@@ -126,8 +126,8 @@
   {% endif %}
 {% endfor %}
   <li class="divider"></li>
-  <li><a href="https://ci.apache.org/projects/flink/flink-docs-release-{{site.version_javadocs}}/api/java"><i class="fa fa-external-link title" aria-hidden="true"></i> Javadocs</a></li>
-  <li><a href="https://ci.apache.org/projects/flink/flink-docs-release-{{site.version_scaladocs}}/api/scala/index.html#org.apache.flink.api.scala.package"><i class="fa fa-external-link title" aria-hidden="true"></i> Scaladocs</a></li>
+  <li><a href="{{ site.javadocs_baseurl }}/api/java"><i class="fa fa-external-link title" aria-hidden="true"></i> Javadocs</a></li>
+  <li><a href="{{ site.javadocs_baseurl }}/api/scala/index.html#org.apache.flink.api.scala.package"><i class="fa fa-external-link title" aria-hidden="true"></i> Scaladocs</a></li>
   <li><a href="http://flink.apache.org"><i class="fa fa-external-link title" aria-hidden="true"></i> Project Page</a></li>
 </ul>
 
diff --git a/docs/_layouts/base.html b/docs/_layouts/base.html
index 7d86d74f12b..1dbaa76696a 100644
--- a/docs/_layouts/base.html
+++ b/docs/_layouts/base.html
@@ -26,6 +26,7 @@
     <title>Apache Flink {{ site.version_title }} Documentation: {{ page.title }}</title>
     <link rel="shortcut icon" href="{{ site.baseurl }}/page/favicon.ico" type="image/x-icon">
     <link rel="icon" href="{{ site.baseurl }}/page/favicon.ico" type="image/x-icon">
+    <link rel="canonical" href="{{ site.stable_baseurl }}{{ page.url | replace:'index.html',''}}">
 
     <!-- Bootstrap -->
     <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css">
diff --git a/docs/dev/api_concepts.md b/docs/dev/api_concepts.md
index c4215074683..d1e61003951 100644
--- a/docs/dev/api_concepts.md
+++ b/docs/dev/api_concepts.md
@@ -510,7 +510,7 @@ data.map(new MapFunction<String, Integer> () {
 
 #### Java 8 Lambdas
 
-Flink also supports Java 8 Lambdas in the Java API. Please see the full [Java 8 Guide]({{ site.baseurl }}/dev/java8.html).
+Flink also supports Java 8 Lambdas in the Java API. Please see the full [Java 8 Guide]({{ site.baseurl }}/dev/java_lambdas.html).
 
 {% highlight java %}
 data.filter(s -> s.startsWith("http://"));
diff --git a/docs/dev/connectors/elasticsearch.md b/docs/dev/connectors/elasticsearch.md
index 52d1b58bf51..927bb69f1a7 100644
--- a/docs/dev/connectors/elasticsearch.md
+++ b/docs/dev/connectors/elasticsearch.md
@@ -55,6 +55,11 @@ of the Elasticsearch installation:
         <td>1.3.0</td>
         <td>5.x</td>
     </tr>
+    <tr>
+        <td>flink-connector-elasticsearch6{{ site.scala_version_suffix }}</td>
+        <td>1.6.0</td>
+        <td>6 and later versions</td>
+    </tr>
   </tbody>
 </table>
 
@@ -71,7 +76,7 @@ creating an `ElasticsearchSink` for requesting document actions against your clu
 
 ## Elasticsearch Sink
 
-The `ElasticsearchSink` uses a `TransportClient` to communicate with an
+The `ElasticsearchSink` uses a `TransportClient` (before 6.x) or `RestHighLevelClient` (starting with 6.x) to communicate with an
 Elasticsearch cluster.
 
 The example below shows how to configure and create a sink:
@@ -79,6 +84,23 @@ The example below shows how to configure and create a sink:
 <div class="codetabs" markdown="1">
 <div data-lang="java, Elasticsearch 1.x" markdown="1">
 {% highlight java %}
+import org.apache.flink.api.common.functions.RuntimeContext;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSink;
+import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
+import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
+
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+
+import java.net.InetAddress;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
 DataStream<String> input = ...;
 
 Map<String, String> config = new HashMap<>();
@@ -110,6 +132,22 @@ input.addSink(new ElasticsearchSink<>(config, transportAddresses, new Elasticsea
 </div>
 <div data-lang="java, Elasticsearch 2.x / 5.x" markdown="1">
 {% highlight java %}
+import org.apache.flink.api.common.functions.RuntimeContext;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
+import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
+import org.apache.flink.streaming.connectors.elasticsearch5.ElasticsearchSink;
+
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.client.Requests;
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
 DataStream<String> input = ...;
 
 Map<String, String> config = new HashMap<>();
@@ -138,8 +176,85 @@ input.addSink(new ElasticsearchSink<>(config, transportAddresses, new Elasticsea
     }
 }));{% endhighlight %}
 </div>
+<div data-lang="java, Elasticsearch 6.x" markdown="1">
+{% highlight java %}
+import org.apache.flink.api.common.functions.RuntimeContext;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
+import org.apache.flink.streaming.connectors.elasticsearch6.ElasticsearchSink;
+
+import org.apache.http.HttpHost;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.client.Requests;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+DataStream<String> input = ...;
+
+List<HttpHost> httpHost = new ArrayList<>();
+httpHosts.add(new HttpHost("127.0.0.1", 9200, "http"));
+httpHosts.add(new HttpHost("10.2.3.1", 9200, "http"));
+
+// use a ElasticsearchSink.Builder to create an ElasticsearchSink
+ElasticsearchSink.Builder<String> esSinkBuilder = new ElasticsearchSink.Builder<>(
+    httpHosts,
+    new ElasticsearchSinkFunction<String>() {
+        public IndexRequest createIndexRequest(String element) {
+            Map<String, String> json = new HashMap<>();
+            json.put("data", element);
+        
+            return Requests.indexRequest()
+                    .index("my-index")
+                    .type("my-type")
+                    .source(json);
+        }
+        
+        @Override
+        public void process(String element, RuntimeContext ctx, RequestIndexer indexer) {
+            indexer.add(createIndexRequest(element));
+        }
+    }
+);
+
+// configuration for the bulk requests; this instructs the sink to emit after every element, otherwise they would be buffered
+builder.setBulkFlushMaxActions(1);
+
+// provide a RestClientFactory for custom configuration on the internally created REST client
+builder.setRestClientFactory(
+  restClientBuilder -> {
+    restClientBuilder.setDefaultHeaders(...)
+    restClientBuilder.setMaxRetryTimeoutMillis(...)
+    restClientBuilder.setPathPrefix(...)
+    restClientBuilder.setHttpClientConfigCallback(...)
+  }
+);
+
+// finally, build and add the sink to the job's pipeline
+input.addSink(esSinkBuilder.build());
+{% endhighlight %}
+</div>
 <div data-lang="scala, Elasticsearch 1.x" markdown="1">
 {% highlight scala %}
+import org.apache.flink.api.common.functions.RuntimeContext
+import org.apache.flink.streaming.api.datastream.DataStream
+import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSink
+import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction
+import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer
+
+import org.elasticsearch.action.index.IndexRequest
+import org.elasticsearch.client.Requests
+import org.elasticsearch.common.transport.InetSocketTransportAddress
+import org.elasticsearch.common.transport.TransportAddress
+
+import java.net.InetAddress
+import java.util.ArrayList
+import java.util.HashMap
+import java.util.List
+import java.util.Map
+
 val input: DataStream[String] = ...
 
 val config = new java.util.HashMap[String, String]
@@ -166,6 +281,22 @@ input.addSink(new ElasticsearchSink(config, transportAddresses, new Elasticsearc
 </div>
 <div data-lang="scala, Elasticsearch 2.x / 5.x" markdown="1">
 {% highlight scala %}
+import org.apache.flink.api.common.functions.RuntimeContext
+import org.apache.flink.streaming.api.datastream.DataStream
+import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction
+import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer
+import org.apache.flink.streaming.connectors.elasticsearch5.ElasticsearchSink
+
+import org.elasticsearch.action.index.IndexRequest
+import org.elasticsearch.client.Requests
+
+import java.net.InetAddress
+import java.net.InetSocketAddress
+import java.util.ArrayList
+import java.util.HashMap
+import java.util.List
+import java.util.Map
+
 val input: DataStream[String] = ...
 
 val config = new java.util.HashMap[String, String]
@@ -190,14 +321,74 @@ input.addSink(new ElasticsearchSink(config, transportAddresses, new Elasticsearc
 }))
 {% endhighlight %}
 </div>
+<div data-lang="scala, Elasticsearch 6.x" markdown="1">
+{% highlight scala %}
+import org.apache.flink.api.common.functions.RuntimeContext
+import org.apache.flink.streaming.api.datastream.DataStream
+import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer
+import org.apache.flink.streaming.connectors.elasticsearch6.ElasticsearchSink
+
+import org.apache.http.HttpHost
+import org.elasticsearch.action.index.IndexRequest
+import org.elasticsearch.client.Requests
+
+import java.util.ArrayList
+import java.util.List
+
+val input: DataStream[String] = ...
+
+val httpHosts = new java.util.ArrayList[HttpHost]
+httpHosts.add(new HttpHost("127.0.0.1", 9300, "http"))
+httpHosts.add(new HttpHost("10.2.3.1", 9300, "http"))
+
+val esSinkBuilder = new ElasticsearchSink.Builer[String](
+  httpHosts,
+  new ElasticsearchSinkFunction[String] {
+    def createIndexRequest(element: String): IndexRequest = {
+      val json = new java.util.HashMap[String, String]
+      json.put("data", element)
+      
+      return Requests.indexRequest()
+              .index("my-index")
+              .type("my-type")
+              .source(json)
+    }
+  }
+)
+
+// configuration for the bulk requests; this instructs the sink to emit after every element, otherwise they would be buffered
+builder.setBulkFlushMaxActions(1)
+
+// provide a RestClientFactory for custom configuration on the internally created REST client
+builder.setRestClientFactory(
+  restClientBuilder -> {
+    restClientBuilder.setDefaultHeaders(...)
+    restClientBuilder.setMaxRetryTimeoutMillis(...)
+    restClientBuilder.setPathPrefix(...)
+    restClientBuilder.setHttpClientConfigCallback(...)
+  }
+)
+
+// finally, build and add the sink to the job's pipeline
+input.addSink(esSinkBuilder.build)
+{% endhighlight %}
+</div>
 </div>
 
-Note how a `Map` of `String`s is used to configure the `ElasticsearchSink`.
+For Elasticsearch versions that still uses the now deprecated `TransportClient` to communicate
+with the Elasticsearch cluster (i.e., versions equal or below 5.x), note how a `Map` of `String`s
+is used to configure the `ElasticsearchSink`. This config map will be directly
+forwarded when creating the internally used `TransportClient`.
 The configuration keys are documented in the Elasticsearch documentation
 [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html).
 Especially important is the `cluster.name` parameter that must correspond to
 the name of your cluster.
 
+For Elasticsearch 6.x and above, internally, the `RestHighLevelClient` is used for cluster communication.
+By default, the connector uses the default configurations for the REST client. To have custom
+configuration for the REST client, users can provide a `RestClientFactory` implementation when 
+setting up the `ElasticsearchClient.Builder` that builds the sink.
+
 Also note that the example only demonstrates performing a single index
 request for each incoming element. Generally, the `ElasticsearchSinkFunction`
 can be used to perform multiple requests of different types (ex.,
diff --git a/docs/dev/connectors/filesystem_sink.md b/docs/dev/connectors/filesystem_sink.md
index af1349d6665..79ed08e9d41 100644
--- a/docs/dev/connectors/filesystem_sink.md
+++ b/docs/dev/connectors/filesystem_sink.md
@@ -70,7 +70,8 @@ stored. The sink can be further configured by specifying a custom bucketer, writ
 
 By default the bucketing sink will split by the current system time when elements arrive and will
 use the datetime pattern `"yyyy-MM-dd--HH"` to name the buckets. This pattern is passed to
-`SimpleDateFormat` with the current system time to form a bucket path. A new bucket will be created
+`DateTimeFormatter` with the current system time and JVM's default timezone to form a bucket path.
+Users can also specify a timezone for the bucketer to format bucket path. A new bucket will be created
 whenever a new date is encountered. For example, if you have a pattern that contains minutes as the
 finest granularity you will get a new bucket every minute. Each bucket is itself a directory that
 contains several part files: each parallel instance of the sink will create its own part file and
@@ -105,7 +106,7 @@ Example:
 DataStream<Tuple2<IntWritable,Text>> input = ...;
 
 BucketingSink<String> sink = new BucketingSink<String>("/base/path");
-sink.setBucketer(new DateTimeBucketer<String>("yyyy-MM-dd--HHmm"));
+sink.setBucketer(new DateTimeBucketer<String>("yyyy-MM-dd--HHmm", ZoneId.of("America/Los_Angeles")));
 sink.setWriter(new SequenceFileWriter<IntWritable, Text>());
 sink.setBatchSize(1024 * 1024 * 400); // this is 400 MB,
 sink.setBatchRolloverInterval(20 * 60 * 1000); // this is 20 mins
@@ -119,7 +120,7 @@ input.addSink(sink);
 val input: DataStream[Tuple2[IntWritable, Text]] = ...
 
 val sink = new BucketingSink[String]("/base/path")
-sink.setBucketer(new DateTimeBucketer[String]("yyyy-MM-dd--HHmm"))
+sink.setBucketer(new DateTimeBucketer[String]("yyyy-MM-dd--HHmm", ZoneId.of("America/Los_Angeles")))
 sink.setWriter(new SequenceFileWriter[IntWritable, Text]())
 sink.setBatchSize(1024 * 1024 * 400) // this is 400 MB,
 sink.setBatchRolloverInterval(20 * 60 * 1000); // this is 20 mins
diff --git a/docs/dev/connectors/kafka.md b/docs/dev/connectors/kafka.md
index 47a6651a5de..b9dbd8fe07f 100644
--- a/docs/dev/connectors/kafka.md
+++ b/docs/dev/connectors/kafka.md
@@ -166,6 +166,39 @@ For convenience, Flink provides the following schemas:
     The KeyValue objectNode contains a "key" and "value" field which contain all fields, as well as
     an optional "metadata" field that exposes the offset/partition/topic for this message.
     
+3. `AvroDeserializationSchema` which reads data serialized with Avro format using a statically provided schema. It can
+    infer the schema from Avro generated classes (`AvroDeserializationSchema.forSpecific(...)`) or it can work with `GenericRecords`
+    with a manually provided schema (with `AvroDeserializationSchema.forGeneric(...)`). This deserialization schema expects that
+    the serialized records DO NOT contain embedded schema.
+
+    - There is also a version of this schema available that can lookup the writer's schema (schema which was used to write the record) in
+      [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/docs/index.html). Using these deserialization schema
+      record will be read with the schema that was retrieved from Schema Registry and transformed to a statically provided( either through 
+      `ConfluentRegistryAvroDeserializationSchema.forGeneric(...)` or `ConfluentRegistryAvroDeserializationSchema.forSpecific(...)`).
+
+    <br>To use this deserialization schema one has to add the following additional dependency:
+    
+<div class="codetabs" markdown="1">
+<div data-lang="AvroDeserializationSchema" markdown="1">
+{% highlight xml %}
+<dependency>
+  <groupId>org.apache.flink</groupId>
+  <artifactId>flink-avro</artifactId>
+  <version>{{site.version }}</version>
+</dependency>
+{% endhighlight %}
+</div>
+<div data-lang="ConfluentRegistryAvroDeserializationSchema" markdown="1">
+{% highlight xml %}
+<dependency>
+  <groupId>org.apache.flink</groupId>
+  <artifactId>flink-avro-confluent-registry</artifactId>
+  <version>{{site.version }}</version>
+</dependency>
+{% endhighlight %}
+</div>
+</div>
+
 When encountering a corrupted message that cannot be deserialized for any reason, there
 are two options - either throwing an exception from the `deserialize(...)` method
 which will cause the job to fail and be restarted, or returning `null` to allow
diff --git a/docs/dev/connectors/streamfile_sink.md b/docs/dev/connectors/streamfile_sink.md
new file mode 100644
index 00000000000..aea66c3cc48
--- /dev/null
+++ b/docs/dev/connectors/streamfile_sink.md
@@ -0,0 +1,123 @@
+---
+title: "Streaming File Sink"
+nav-title: Streaming File Sink
+nav-parent_id: connectors
+nav-pos: 5
+---
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+This connector provides a Sink that writes partitioned files to filesystems
+supported by the Flink `FileSystem` abstraction. Since in streaming the input
+is potentially infinite, the streaming file sink writes data into buckets. The
+bucketing behaviour is configurable but a useful default is time-based
+bucketing where we start writing a new bucket every hour and thus get
+individual files that each contain a part of the infinite output stream.
+
+Within a bucket, we further split the output into smaller part files based on a
+rolling policy. This is useful to prevent individual bucket files from getting
+too big. This is also configurable but the default policy rolls files based on
+file size and a timeout, i.e if no new data was written to a part file. 
+
+The `StreamingFileSink` supports both row-wise encoding formats and
+bulk-encoding formats, such as [Apache Parquet](http://parquet.apache.org).
+
+#### Using Row-encoded Output Formats
+
+The only required configuration are the base path where we want to output our
+data and an
+[Encoder]({{ site.javadocs_baseurl }}/api/java/org/apache/flink/api/common/serialization/Encoder.html)
+that is used for serializing records to the `OutputStream` for each file.
+
+Basic usage thus looks like this:
+
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+import org.apache.flink.api.common.serialization.Encoder;
+import org.apache.flink.core.fs.Path;
+import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
+
+DataStream<String> input = ...;
+
+final StreamingFileSink<String> sink = StreamingFileSink
+	.forRowFormat(new Path(outputPath), (Encoder<String>) (element, stream) -> {
+		PrintStream out = new PrintStream(stream);
+		out.println(element.f1);
+	})
+	.build();
+
+input.addSink(sink);
+
+{% endhighlight %}
+</div>
+<div data-lang="scala" markdown="1">
+{% highlight scala %}
+import org.apache.flink.api.common.serialization.Encoder
+import org.apache.flink.core.fs.Path
+import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink
+
+val input: DataStream[String] = ...
+
+final StreamingFileSink[String] sink = StreamingFileSink
+	.forRowFormat(new Path(outputPath), (element, stream) => {
+		val out = new PrintStream(stream)
+		out.println(element.f1)
+	})
+	.build()
+
+input.addSink(sink)
+
+{% endhighlight %}
+</div>
+</div>
+
+This will create a streaming sink that creates hourly buckets and uses a
+default rolling policy. The default bucket assigner is
+[DateTimeBucketAssigner]({{ site.javadocs_baseurl }}/api/java/org/apache/flink/streaming/api/functions/sink/filesystem/bucketassigners/DateTimeBucketAssigner.html)
+and the default rolling policy is
+[DefaultRollingPolicy]({{ site.javadocs_baseurl }}/api/java/org/apache/flink/streaming/api/functions/sink/filesystem/rollingpolicies/DefaultRollingPolicy.html).
+You can specify a custom
+[BucketAssigner]({{ site.javadocs_baseurl }}/api/java/org/apache/flink/streaming/api/functions/sink/filesystem/BucketAssigner.html)
+and
+[RollingPolicy]({{ site.javadocs_baseurl }}/api/java/org/apache/flink/streaming/api/functions/sink/filesystem/RollingPolicy.html)
+on the sink builder. Please check out the JavaDoc for
+[StreamingFileSink]({{ site.javadocs_baseurl }}/api/java/org/apache/flink/streaming/api/functions/sink/filesystem/StreamingFileSink.html)
+for more configuration options and more documentation about the workings and
+interactions of bucket assigners and rolling policies.
+
+#### Using Bulk-encoded Output Formats
+
+In the above example we used an `Encoder` that can encode or serialize each
+record individually. The streaming file sink also supports bulk-encoded output
+formats such as [Apache Parquet](http://parquet.apache.org). To use these,
+instead of `StreamingFileSink.forRowFormat()` you would use
+`StreamingFileSink.forBulkFormat()` and specify a `BulkWriter.Factory`.
+
+[ParquetAvroWriters]({{ site.javadocs_baseurl }}/api/java/org/apache/flink/formats/parquet/avro/ParquetAvroWriters.html)
+has static methods for creating a `BulkWriter.Factory` for various types.
+
+<div class="alert alert-info">
+    <b>IMPORTANT:</b> Bulk-encoding formats can only be combined with the
+    `OnCheckpointRollingPolicy`, which rolls the in-progress part file on
+    every checkpoint.
+</div>
+
+{% top %}
diff --git a/docs/dev/java8.md b/docs/dev/java8.md
deleted file mode 100644
index 8e7e6435e6b..00000000000
--- a/docs/dev/java8.md
+++ /dev/null
@@ -1,198 +0,0 @@
----
-title: "Java 8"
-nav-parent_id: api-concepts
-nav-pos: 20
----
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied.  See the License for the
-specific language governing permissions and limitations
-under the License.
--->
-
-Java 8 introduces several new language features designed for faster and clearer coding. With the most important feature,
-the so-called "Lambda Expressions", Java 8 opens the door to functional programming. Lambda Expressions allow for implementing and
-passing functions in a straightforward way without having to declare additional (anonymous) classes.
-
-The newest version of Flink supports the usage of Lambda Expressions for all operators of the Java API.
-This document shows how to use Lambda Expressions and describes current limitations. For a general introduction to the
-Flink API, please refer to the [Programming Guide]({{ site.baseurl }}/dev/api_concepts.html)
-
-* TOC
-{:toc}
-
-### Examples
-
-The following example illustrates how to implement a simple, inline `map()` function that squares its input using a Lambda Expression.
-The types of input `i` and output parameters of the `map()` function need not to be declared as they are inferred by the Java 8 compiler.
-
-{% highlight java %}
-env.fromElements(1, 2, 3)
-// returns the squared i
-.map(i -> i*i)
-.print();
-{% endhighlight %}
-
-The next two examples show different implementations of a function that uses a `Collector` for output.
-Functions, such as `flatMap()`, require an output type (in this case `String`) to be defined for the `Collector` in order to be type-safe.
-If the `Collector` type can not be inferred from the surrounding context, it needs to be declared in the Lambda Expression's parameter list manually.
-Otherwise the output will be treated as type `Object` which can lead to undesired behaviour.
-
-{% highlight java %}
-DataSet<Integer> input = env.fromElements(1, 2, 3);
-
-// collector type must be declared
-input.flatMap((Integer number, Collector<String> out) -> {
-    StringBuilder builder = new StringBuilder();
-    for(int i = 0; i < number; i++) {
-        builder.append("a");
-        out.collect(builder.toString());
-    }
-})
-// returns (on separate lines) "a", "a", "aa", "a", "aa", "aaa"
-.print();
-{% endhighlight %}
-
-{% highlight java %}
-DataSet<Integer> input = env.fromElements(1, 2, 3);
-
-// collector type must not be declared, it is inferred from the type of the dataset
-DataSet<String> manyALetters = input.flatMap((number, out) -> {
-    StringBuilder builder = new StringBuilder();
-    for(int i = 0; i < number; i++) {
-       builder.append("a");
-       out.collect(builder.toString());
-    }
-});
-
-// returns (on separate lines) "a", "a", "aa", "a", "aa", "aaa"
-manyALetters.print();
-{% endhighlight %}
-
-The following code demonstrates a word count which makes extensive use of Lambda Expressions.
-
-{% highlight java %}
-DataSet<String> input = env.fromElements("Please count", "the words", "but not this");
-
-// filter out strings that contain "not"
-input.filter(line -> !line.contains("not"))
-// split each line by space
-.map(line -> line.split(" "))
-// emit a pair <word,1> for each array element
-.flatMap((String[] wordArray, Collector<Tuple2<String, Integer>> out)
-    -> Arrays.stream(wordArray).forEach(t -> out.collect(new Tuple2<>(t, 1)))
-    )
-// group and sum up
-.groupBy(0).sum(1)
-// print
-.print();
-{% endhighlight %}
-
-### Compiler Limitations
-Currently, Flink only supports jobs containing Lambda Expressions completely if they are **compiled with the Eclipse JDT compiler contained in Eclipse Luna 4.4.2 (and above)**.
-
-Only the Eclipse JDT compiler preserves the generic type information necessary to use the entire Lambda Expressions feature type-safely.
-Other compilers such as the OpenJDK's and Oracle JDK's `javac` throw away all generic parameters related to Lambda Expressions. This means that types such as `Tuple2<String, Integer>` or `Collector<String>` declared as a Lambda function input or output parameter will be pruned to `Tuple2` or `Collector` in the compiled `.class` files, which is too little information for the Flink compiler.
-
-How to compile a Flink job that contains Lambda Expressions with the JDT compiler will be covered in the next section.
-
-However, it is possible to implement functions such as `map()` or `filter()` with Lambda Expressions in Java 8 compilers other than the Eclipse JDT compiler as long as the function has no `Collector`s or `Iterable`s *and* only if the function handles unparameterized types such as `Integer`, `Long`, `String`, `MyOwnClass` (types without Generics!).
-
-#### Compile Flink jobs with the Eclipse JDT compiler and Maven
-
-If you are using the Eclipse IDE, you can run and debug your Flink code within the IDE without any problems after some configuration steps. The Eclipse IDE by default compiles its Java sources with the Eclipse JDT compiler. The next section describes how to configure the Eclipse IDE.
-
-If you are using a different IDE such as IntelliJ IDEA or you want to package your Jar-File with Maven to run your job on a cluster, you need to modify your project's `pom.xml` file and build your program with Maven. The [quickstart]({{site.baseurl}}/quickstart/setup_quickstart.html) contains preconfigured Maven projects which can be used for new projects or as a reference. Uncomment the mentioned lines in your generated quickstart `pom.xml` file if you want to use Java 8 with Lambda Expressions.
-
-Alternatively, you can manually insert the following lines to your Maven `pom.xml` file. Maven will then use the Eclipse JDT compiler for compilation.
-
-{% highlight xml %}
-<!-- put these lines under "project/build/pluginManagement/plugins" of your pom.xml -->
-
-<plugin>
-    <!-- Use compiler plugin with tycho as the adapter to the JDT compiler. -->
-    <artifactId>maven-compiler-plugin</artifactId>
-    <configuration>
-        <source>1.8</source>
-        <target>1.8</target>
-        <compilerId>jdt</compilerId>
-    </configuration>
-    <dependencies>
-        <!-- This dependency provides the implementation of compiler "jdt": -->
-        <dependency>
-            <groupId>org.eclipse.tycho</groupId>
-            <artifactId>tycho-compiler-jdt</artifactId>
-            <version>0.21.0</version>
-        </dependency>
-    </dependencies>
-</plugin>
-{% endhighlight %}
-
-If you are using Eclipse for development, the m2e plugin might complain about the inserted lines above and marks your `pom.xml` as invalid. If so, insert the following lines to your `pom.xml`.
-
-{% highlight xml %}
-<!-- put these lines under "project/build/pluginManagement/plugins/plugin[groupId="org.eclipse.m2e", artifactId="lifecycle-mapping"]/configuration/lifecycleMappingMetadata/pluginExecutions" of your pom.xml -->
-
-<pluginExecution>
-    <pluginExecutionFilter>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-compiler-plugin</artifactId>
-        <versionRange>[3.1,)</versionRange>
-        <goals>
-            <goal>testCompile</goal>
-            <goal>compile</goal>
-        </goals>
-    </pluginExecutionFilter>
-    <action>
-        <ignore></ignore>
-    </action>
-</pluginExecution>
-{% endhighlight %}
-
-#### Run and debug Flink jobs within the Eclipse IDE
-
-First of all, make sure you are running a current version of Eclipse IDE (4.4.2 or later). Also make sure that you have a Java 8 Runtime Environment (JRE) installed in Eclipse IDE (`Window` -> `Preferences` -> `Java` -> `Installed JREs`).
-
-Create/Import your Eclipse project.
-
-If you are using Maven, you also need to change the Java version in your `pom.xml` for the `maven-compiler-plugin`. Otherwise right click the `JRE System Library` section of your project and open the `Properties` window in order to switch to a Java 8 JRE (or above) that supports Lambda Expressions.
-
-The Eclipse JDT compiler needs a special compiler flag in order to store type information in `.class` files. Open the JDT configuration file at `{project directory}/.settings/org.eclipse.jdt.core.prefs` with your favorite text editor and add the following line:
-
-{% highlight plain %}
-org.eclipse.jdt.core.compiler.codegen.lambda.genericSignature=generate
-{% endhighlight %}
-
-If not already done, also modify the Java versions of the following properties to `1.8` (or above):
-
-{% highlight plain %}
-org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
-org.eclipse.jdt.core.compiler.compliance=1.8
-org.eclipse.jdt.core.compiler.source=1.8
-{% endhighlight %}
-
-After you have saved the file, perform a complete project refresh in Eclipse IDE.
-
-If you are using Maven, right click your Eclipse project and select `Maven` -> `Update Project...`.
-
-You have configured everything correctly, if the following Flink program runs without exceptions:
-
-{% highlight java %}
-final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
-env.fromElements(1, 2, 3).map((in) -> new Tuple1<String>(" " + in)).print();
-env.execute();
-{% endhighlight %}
-
-{% top %}
diff --git a/docs/dev/java_lambdas.md b/docs/dev/java_lambdas.md
new file mode 100644
index 00000000000..4b306ac455d
--- /dev/null
+++ b/docs/dev/java_lambdas.md
@@ -0,0 +1,138 @@
+---
+title: "Java Lambda Expressions"
+nav-parent_id: api-concepts
+nav-pos: 20
+---
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+Java 8 introduced several new language features designed for faster and clearer coding. With the most important feature,
+the so-called "Lambda Expressions", it opened the door to functional programming. Lambda expressions allow for implementing and
+passing functions in a straightforward way without having to declare additional (anonymous) classes.
+
+<span class="label label-danger">Attention</span> Flink supports the usage of lambda expressions for all operators of the Java API, however, whenever a lambda expression uses Java generics you need to declare type information *explicitly*. 
+
+This document shows how to use lambda expressions and describes current limitations. For a general introduction to the
+Flink API, please refer to the [Programming Guide]({{ site.baseurl }}/dev/api_concepts.html)
+
+### Examples and Limitations
+
+The following example illustrates how to implement a simple, inline `map()` function that squares its input using a lambda expression.
+The types of input `i` and output parameters of the `map()` function need not to be declared as they are inferred by the Java compiler.
+
+{% highlight java %}
+env.fromElements(1, 2, 3)
+// returns the squared i
+.map(i -> i*i)
+.print();
+{% endhighlight %}
+
+Flink can automatically extract the result type information from the implementation of the method signature `OUT map(IN value)` because `OUT` is not generic but `Integer`.
+
+Unfortunately, functions such as `flatMap()` with a signature `void flatMap(IN value, Collector<OUT> out)` are compiled into `void flatMap(IN value, Collector out)` by the Java compiler. This makes it impossible for Flink to infer the type information for the output type automatically.
+
+Flink will most likely throw an exception similar to the following:
+
+{% highlight plain%}
+org.apache.flink.api.common.functions.InvalidTypesException: The generic type parameters of 'Collector' are missing.
+    In many cases lambda methods don't provide enough information for automatic type extraction when Java generics are involved.
+    An easy workaround is to use an (anonymous) class instead that implements the 'org.apache.flink.api.common.functions.FlatMapFunction' interface.
+    Otherwise the type has to be specified explicitly using type information.
+{% endhighlight %}
+
+In this case, the type information needs to be *specified explicitly*, otherwise the output will be treated as type `Object` which leads to unefficient serialization.
+
+{% highlight java %}
+import org.apache.flink.api.common.typeinfo.Types;
+import org.apache.flink.api.java.DataSet;
+import org.apache.flink.util.Collector;
+
+DataSet<Integer> input = env.fromElements(1, 2, 3);
+
+// collector type must be declared
+input.flatMap((Integer number, Collector<String> out) -> {
+    StringBuilder builder = new StringBuilder();
+    for(int i = 0; i < number; i++) {
+        builder.append("a");
+        out.collect(builder.toString());
+    }
+})
+// provide type information explicitly
+.returns(Types.STRING)
+// prints "a", "a", "aa", "a", "aa", "aaa"
+.print();
+{% endhighlight %}
+
+Similar problems occur when using a `map()` function with a generic return type. A method signature `Tuple2<Integer, Integer> map(Integer value)` is erasured to `Tuple2 map(Integer value)` in the example below.
+
+{% highlight java %}
+import org.apache.flink.api.common.functions.MapFunction;
+import org.apache.flink.api.java.tuple.Tuple2;
+
+env.fromElements(1, 2, 3)
+    .map(i -> Tuple2.of(i, i))    // no information about fields of Tuple2
+    .print();
+{% endhighlight %}
+
+In general, those problems can be solved in multiple ways:
+
+{% highlight java %}
+import org.apache.flink.api.common.typeinfo.Types;
+import org.apache.flink.api.java.tuple.Tuple2;
+
+// use the explicit ".returns(...)"
+env.fromElements(1, 2, 3)
+    .map(i -> Tuple2.of(i, i))
+    .returns(Types.TUPLE(Types.INT, Types.INT))
+    .print();
+
+// use a class instead
+env.fromElements(1, 2, 3)
+    .map(new MyTuple2Mapper())
+    .print();
+
+public static class MyTuple2Mapper extends MapFunction<Integer, Integer> {
+    @Override
+    public Tuple2<Integer, Integer> map(Integer i) {
+        return Tuple2.of(i, i);
+    }
+}
+
+// use an anonymous class instead
+env.fromElements(1, 2, 3)
+    .map(new MapFunction<Integer, Tuple2<Integer, Integer>> {
+        @Override
+        public Tuple2<Integer, Integer> map(Integer i) {
+            return Tuple2.of(i, i);
+        }
+    })
+    .print();
+
+// or in this example use a tuple subclass instead
+env.fromElements(1, 2, 3)
+    .map(i -> new DoubleTuple(i, i))
+    .print();
+
+public static class DoubleTuple extends Tuple2<Integer, Integer> {
+    public DoubleTuple(int f0, int f1) {
+        this.f0 = f0;
+        this.f1 = f1;
+    }
+}
+{% endhighlight %}
\ No newline at end of file
diff --git a/docs/dev/libs/cep.md b/docs/dev/libs/cep.md
index 6723e715e82..ad321bf71b5 100644
--- a/docs/dev/libs/cep.md
+++ b/docs/dev/libs/cep.md
@@ -250,21 +250,10 @@ For a pattern named `start`, the following are valid quantifiers:
 
 #### Conditions
 
-At every pattern, and to go from one pattern to the next, you can specify additional **conditions**.
-You can relate these conditions to:
-
- 1. A [property of the incoming event](#conditions-on-properties), e.g. its value should be larger than 5,
- or larger than the average value of the previously accepted events.
-
- 2. The [contiguity of the matching events](#conditions-on-contiguity), e.g. detect pattern `a,b,c` without
- non-matching events between any matching ones.
-
-The latter refers to "looping" patterns, *i.e.* patterns that can accept more than one event, e.g. the `b+` in `a b+ c`,
-which searches for one or more `b`'s.
-
-##### Conditions on Properties
-
-You can specify conditions on the event properties via the `pattern.where()`, `pattern.or()` or the `pattern.until()` method. These can be either `IterativeCondition`s or `SimpleCondition`s.
+For every pattern you can specify a condition that an incoming event has to meet in order to be "accepted" into the pattern e.g. its value should be larger than 5,
+or larger than the average value of the previously accepted events.
+You can specify conditions on the event properties via the `pattern.where()`, `pattern.or()` or `pattern.until()` methods.
+These can be either `IterativeCondition`s or `SimpleCondition`s.
 
 **Iterative Conditions:** This is the most general type of condition. This is how you can specify a condition that
 accepts subsequent events based on properties of the previously accepted events or a statistic over a subset of them.
@@ -396,36 +385,6 @@ To better understand it, have a look at the following example. Given
 
 As you can see `{a1 a2 a3}` or `{a2 a3}` are not returned due to the stop condition.
 
-##### Conditions on Contiguity
-
-FlinkCEP supports the following forms of contiguity between events:
-
- 1. **Strict Contiguity**: Expects all matching events to appear strictly one after the other, without any non-matching events in-between.
-
- 2. **Relaxed Contiguity**: Ignores non-matching events appearing in-between the matching ones.
-
- 3. **Non-Deterministic Relaxed Contiguity**: Further relaxes contiguity, allowing additional matches
- that ignore some matching events.
-
-To illustrate the above with an example, a pattern sequence `"a+ b"` (one or more `"a"`'s followed by a `"b"`) with
-input `"a1", "c", "a2", "b"` will have the following results:
-
- 1. **Strict Contiguity**: `{a2 b}` -- the `"c"` after `"a1"` causes `"a1"` to be discarded.
-
- 2. **Relaxed Contiguity**: `{a1 b}` and `{a1 a2 b}` -- `"c"` is ignored.
-
- 3. **Non-Deterministic Relaxed Contiguity**: `{a1 b}`, `{a2 b}`, and `{a1 a2 b}`.
-
-For looping patterns (e.g. `oneOrMore()` and `times()`) the default is *relaxed contiguity*. If you want
-strict contiguity, you have to explicitly specify it by using the `consecutive()` call, and if you want
-*non-deterministic relaxed contiguity* you can use the `allowCombinations()` call.
-
-{% warn Attention %}
-In this section we are talking about contiguity *within* a single looping pattern, and the
-`consecutive()` and `allowCombinations()` calls need to be understood in that context. Later when looking at
-[Combining Patterns](#combining-patterns) we'll discuss other calls, such as `next()` and `followedBy()`,
-that are used to specify contiguity conditions *between* patterns.
-
 <div class="codetabs" markdown="1">
 <div data-lang="java" markdown="1">
 <table class="table table-bordered">
@@ -565,74 +524,6 @@ pattern.oneOrMore().greedy();
 {% endhighlight %}
           </td>
        </tr>
-       <tr>
-          <td><strong>consecutive()</strong><a name="consecutive_java"></a></td>
-          <td>
-              <p>Works in conjunction with <code>oneOrMore()</code> and <code>times()</code> and imposes strict contiguity between the matching
-              events, i.e. any non-matching element breaks the match (as in <code>next()</code>).</p>
-              <p>If not applied a relaxed contiguity (as in <code>followedBy()</code>) is used.</p>
-
-              <p>E.g. a pattern like:</p>
-{% highlight java %}
-Pattern.<Event>begin("start").where(new SimpleCondition<Event>() {
-  @Override
-  public boolean filter(Event value) throws Exception {
-    return value.getName().equals("c");
-  }
-})
-.followedBy("middle").where(new SimpleCondition<Event>() {
-  @Override
-  public boolean filter(Event value) throws Exception {
-    return value.getName().equals("a");
-  }
-}).oneOrMore().consecutive()
-.followedBy("end1").where(new SimpleCondition<Event>() {
-  @Override
-  public boolean filter(Event value) throws Exception {
-    return value.getName().equals("b");
-  }
-});
-{% endhighlight %}
-              <p>Will generate the following matches for an input sequence: C D A1 A2 A3 D A4 B</p>
-
-              <p>with consecutive applied: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}</p>
-              <p>without consecutive applied: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}, {C A1 A2 A3 A4 B}</p>
-          </td>
-       </tr>
-       <tr>
-       <td><strong>allowCombinations()</strong><a name="allow_comb_java"></a></td>
-       <td>
-              <p>Works in conjunction with <code>oneOrMore()</code> and <code>times()</code> and imposes non-deterministic relaxed contiguity
-              between the matching events (as in <code>followedByAny()</code>).</p>
-              <p>If not applied a relaxed contiguity (as in <code>followedBy()</code>) is used.</p>
-
-              <p>E.g. a pattern like:</p>
-{% highlight java %}
-Pattern.<Event>begin("start").where(new SimpleCondition<Event>() {
-  @Override
-  public boolean filter(Event value) throws Exception {
-    return value.getName().equals("c");
-  }
-})
-.followedBy("middle").where(new SimpleCondition<Event>() {
-  @Override
-  public boolean filter(Event value) throws Exception {
-    return value.getName().equals("a");
-  }
-}).oneOrMore().allowCombinations()
-.followedBy("end1").where(new SimpleCondition<Event>() {
-  @Override
-  public boolean filter(Event value) throws Exception {
-    return value.getName().equals("b");
-  }
-});
-{% endhighlight %}
-               <p>Will generate the following matches for an input sequence: C D A1 A2 A3 D A4 B</p>
-
-               <p>with combinations enabled: {C A1 B}, {C A1 A2 B}, {C A1 A3 B}, {C A1 A4 B}, {C A1 A2 A3 B}, {C A1 A2 A4 B}, {C A1 A3 A4 B}, {C A1 A2 A3 A4 B}</p>
-               <p>without combinations enabled: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}, {C A1 A2 A3 A4 B}</p>
-       </td>
-       </tr>
   </tbody>
 </table>
 </div>
@@ -757,52 +648,9 @@ pattern.oneOrMore().greedy()
 {% endhighlight %}
           </td>
        </tr>
-       <tr>
-          <td><strong>consecutive()</strong><a name="consecutive_scala"></a></td>
-          <td>
-            <p>Works in conjunction with <code>oneOrMore()</code> and <code>times()</code> and imposes strict contiguity between the matching
-                          events, i.e. any non-matching element breaks the match (as in <code>next()</code>).</p>
-                          <p>If not applied a relaxed contiguity (as in <code>followedBy()</code>) is used.</p>
-
-      <p>E.g. a pattern like:</p>
-{% highlight scala %}
-Pattern.begin("start").where(_.getName().equals("c"))
-  .followedBy("middle").where(_.getName().equals("a"))
-                       .oneOrMore().consecutive()
-  .followedBy("end1").where(_.getName().equals("b"))
-{% endhighlight %}
-
-            <p>Will generate the following matches for an input sequence: C D A1 A2 A3 D A4 B</p>
-
-                          <p>with consecutive applied: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}</p>
-                          <p>without consecutive applied: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}, {C A1 A2 A3 A4 B}</p>
-          </td>
-       </tr>
-       <tr>
-              <td><strong>allowCombinations()</strong><a name="allow_comb_java"></a></td>
-              <td>
-                <p>Works in conjunction with <code>oneOrMore()</code> and <code>times()</code> and imposes non-deterministic relaxed contiguity
-                     between the matching events (as in <code>followedByAny()</code>).</p>
-                     <p>If not applied a relaxed contiguity (as in <code>followedBy()</code>) is used.</p>
-
-      <p>E.g. a pattern like:</p>
-{% highlight scala %}
-Pattern.begin("start").where(_.getName().equals("c"))
-  .followedBy("middle").where(_.getName().equals("a"))
-                       .oneOrMore().allowCombinations()
-  .followedBy("end1").where(_.getName().equals("b"))
-{% endhighlight %}
-
-                      <p>Will generate the following matches for an input sequence: C D A1 A2 A3 D A4 B</p>
-
-                      <p>with combinations enabled: {C A1 B}, {C A1 A2 B}, {C A1 A3 B}, {C A1 A4 B}, {C A1 A2 A3 B}, {C A1 A2 A4 B}, {C A1 A3 A4 B}, {C A1 A2 A3 A4 B}</p>
-                      <p>without combinations enabled: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}, {C A1 A2 A3 A4 B}</p>
-              </td>
-              </tr>
   </tbody>
 </table>
 </div>
-
 </div>
 
 ### Combining Patterns
@@ -827,9 +675,16 @@ val start : Pattern[Event, _] = Pattern.begin("start")
 </div>
 
 Next, you can append more patterns to your pattern sequence by specifying the desired *contiguity conditions* between
-them. In the [previous section](#conditions-on-contiguity) we described the different contiguity modes supported by
-Flink, namely *strict*, *relaxed*, and *non-deterministic relaxed*, and how to apply them in looping patterns. To apply
-them between consecutive patterns, you can use:
+them. FlinkCEP supports the following forms of contiguity between events:
+
+ 1. **Strict Contiguity**: Expects all matching events to appear strictly one after the other, without any non-matching events in-between.
+
+ 2. **Relaxed Contiguity**: Ignores non-matching events appearing in-between the matching ones.
+
+ 3. **Non-Deterministic Relaxed Contiguity**: Further relaxes contiguity, allowing additional matches
+ that ignore some matching events. 
+ 
+To apply them between consecutive patterns, you can use:
 
 1. `next()`, for *strict*,
 2. `followedBy()`, for *relaxed*, and
@@ -919,6 +774,164 @@ next.within(Time.seconds(10))
 </div>
 </div>
 
+#### Contiguity within looping patterns
+
+You can apply the same contiguity condition as discussed in the previous [section](#combining-patterns) within a looping pattern.
+The contiguity will be applied between elements accepted into such a pattern.
+To illustrate the above with an example, a pattern sequence `"a b+ c"` (`"a"` followed by any(non-deterministic relaxed) sequence of one or more `"b"`'s followed by a `"c"`) with
+input `"a", "b1", "d1", "b2", "d2", "b3" "c"` will have the following results:
+
+ 1. **Strict Contiguity**: `{a b3 c}` -- the `"d1"` after `"b1"` causes `"b1"` to be discarded, the same happens for `"b2"` because of `"d2"`.
+
+ 2. **Relaxed Contiguity**: `{a b1 c}`, `{a b1 b2 c}`, `{a b1 b2 b3 c}`, `{a b2 c}`, `{a b2 b3 c}`, `{a b3 c}` - `"d"`'s are ignored.
+
+ 3. **Non-Deterministic Relaxed Contiguity**: `{a b1 c}`, `{a b1 b2 c}`, `{a b1 b3 c}`, `{a b1 b2 b3 c}`, `{a b2 c}`, `{a b2 b3 c}`, `{a b3 c}` -
+    notice the `{a b1 b3 c}`, which is the result of relaxing contiguity between `"b"`'s.
+
+For looping patterns (e.g. `oneOrMore()` and `times()`) the default is *relaxed contiguity*. If you want
+strict contiguity, you have to explicitly specify it by using the `consecutive()` call, and if you want
+*non-deterministic relaxed contiguity* you can use the `allowCombinations()` call.
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+<table class="table table-bordered">
+    <thead>
+        <tr>
+            <th class="text-left" style="width: 25%">Pattern Operation</th>
+            <th class="text-center">Description</th>
+        </tr>
+    </thead>
+    <tbody>
+       <tr>
+          <td><strong>consecutive()</strong><a name="consecutive_java"></a></td>
+          <td>
+              <p>Works in conjunction with <code>oneOrMore()</code> and <code>times()</code> and imposes strict contiguity between the matching
+              events, i.e. any non-matching element breaks the match (as in <code>next()</code>).</p>
+              <p>If not applied a relaxed contiguity (as in <code>followedBy()</code>) is used.</p>
+
+              <p>E.g. a pattern like:</p>
+{% highlight java %}
+Pattern.<Event>begin("start").where(new SimpleCondition<Event>() {
+  @Override
+  public boolean filter(Event value) throws Exception {
+    return value.getName().equals("c");
+  }
+})
+.followedBy("middle").where(new SimpleCondition<Event>() {
+  @Override
+  public boolean filter(Event value) throws Exception {
+    return value.getName().equals("a");
+  }
+}).oneOrMore().consecutive()
+.followedBy("end1").where(new SimpleCondition<Event>() {
+  @Override
+  public boolean filter(Event value) throws Exception {
+    return value.getName().equals("b");
+  }
+});
+{% endhighlight %}
+              <p>Will generate the following matches for an input sequence: C D A1 A2 A3 D A4 B</p>
+
+              <p>with consecutive applied: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}</p>
+              <p>without consecutive applied: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}, {C A1 A2 A3 A4 B}</p>
+          </td>
+       </tr>
+       <tr>
+       <td><strong>allowCombinations()</strong><a name="allow_comb_java"></a></td>
+       <td>
+              <p>Works in conjunction with <code>oneOrMore()</code> and <code>times()</code> and imposes non-deterministic relaxed contiguity
+              between the matching events (as in <code>followedByAny()</code>).</p>
+              <p>If not applied a relaxed contiguity (as in <code>followedBy()</code>) is used.</p>
+
+              <p>E.g. a pattern like:</p>
+{% highlight java %}
+Pattern.<Event>begin("start").where(new SimpleCondition<Event>() {
+  @Override
+  public boolean filter(Event value) throws Exception {
+    return value.getName().equals("c");
+  }
+})
+.followedBy("middle").where(new SimpleCondition<Event>() {
+  @Override
+  public boolean filter(Event value) throws Exception {
+    return value.getName().equals("a");
+  }
+}).oneOrMore().allowCombinations()
+.followedBy("end1").where(new SimpleCondition<Event>() {
+  @Override
+  public boolean filter(Event value) throws Exception {
+    return value.getName().equals("b");
+  }
+});
+{% endhighlight %}
+               <p>Will generate the following matches for an input sequence: C D A1 A2 A3 D A4 B</p>
+
+               <p>with combinations enabled: {C A1 B}, {C A1 A2 B}, {C A1 A3 B}, {C A1 A4 B}, {C A1 A2 A3 B}, {C A1 A2 A4 B}, {C A1 A3 A4 B}, {C A1 A2 A3 A4 B}</p>
+               <p>without combinations enabled: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}, {C A1 A2 A3 A4 B}</p>
+       </td>
+       </tr>
+  </tbody>
+</table>
+</div>
+
+<div data-lang="scala" markdown="1">
+<table class="table table-bordered">
+    <thead>
+        <tr>
+            <th class="text-left" style="width: 25%">Pattern Operation</th>
+            <th class="text-center">Description</th>
+        </tr>
+    </thead>
+    <tbody>
+           <tr>
+              <td><strong>consecutive()</strong><a name="consecutive_scala"></a></td>
+              <td>
+                <p>Works in conjunction with <code>oneOrMore()</code> and <code>times()</code> and imposes strict contiguity between the matching
+                              events, i.e. any non-matching element breaks the match (as in <code>next()</code>).</p>
+                              <p>If not applied a relaxed contiguity (as in <code>followedBy()</code>) is used.</p>
+    
+          <p>E.g. a pattern like:</p>
+{% highlight scala %}
+Pattern.begin("start").where(_.getName().equals("c"))
+  .followedBy("middle").where(_.getName().equals("a"))
+                       .oneOrMore().consecutive()
+  .followedBy("end1").where(_.getName().equals("b"))
+{% endhighlight %}
+    
+                <p>Will generate the following matches for an input sequence: C D A1 A2 A3 D A4 B</p>
+    
+                              <p>with consecutive applied: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}</p>
+                              <p>without consecutive applied: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}, {C A1 A2 A3 A4 B}</p>
+              </td>
+           </tr>
+           <tr>
+                  <td><strong>allowCombinations()</strong><a name="allow_comb_java"></a></td>
+                  <td>
+                    <p>Works in conjunction with <code>oneOrMore()</code> and <code>times()</code> and imposes non-deterministic relaxed contiguity
+                         between the matching events (as in <code>followedByAny()</code>).</p>
+                         <p>If not applied a relaxed contiguity (as in <code>followedBy()</code>) is used.</p>
+    
+          <p>E.g. a pattern like:</p>
+{% highlight scala %}
+Pattern.begin("start").where(_.getName().equals("c"))
+  .followedBy("middle").where(_.getName().equals("a"))
+                       .oneOrMore().allowCombinations()
+  .followedBy("end1").where(_.getName().equals("b"))
+{% endhighlight %}
+    
+                          <p>Will generate the following matches for an input sequence: C D A1 A2 A3 D A4 B</p>
+    
+                          <p>with combinations enabled: {C A1 B}, {C A1 A2 B}, {C A1 A3 B}, {C A1 A4 B}, {C A1 A2 A3 B}, {C A1 A2 A4 B}, {C A1 A3 A4 B}, {C A1 A2 A3 A4 B}</p>
+                          <p>without combinations enabled: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}, {C A1 A2 A3 A4 B}</p>
+                  </td>
+                  </tr>
+  </tbody>
+</table>
+</div>
+</div>
+
+### Groups of patterns
+
 It's also possible to define a pattern sequence as the condition for `begin`, `followedBy`, `followedByAny` and
 `next`. The pattern sequence will be considered as the matching condition logically and a `GroupPattern` will be
 returned and it is possible to apply `oneOrMore()`, `times(#ofTimes)`, `times(#fromTimes, #toTimes)`, `optional()`,
diff --git a/docs/dev/scala_shell.md b/docs/dev/scala_shell.md
index b8d2b2c3b9b..d236430ad57 100644
--- a/docs/dev/scala_shell.md
+++ b/docs/dev/scala_shell.md
@@ -175,7 +175,7 @@ Starts Flink scala shell connecting to a yarn cluster
   -n arg | --container arg
         Number of YARN container to allocate (= Number of TaskManagers)
   -jm arg | --jobManagerMemory arg
-        Memory for JobManager container [in MB]
+        Memory for JobManager container with optional unit (default: MB)
   -nm <value> | --name <value>
         Set a custom name for the application on YARN
   -qu <arg> | --queue <arg>
@@ -183,7 +183,7 @@ Starts Flink scala shell connecting to a yarn cluster
   -s <arg> | --slots <arg>
         Number of slots per TaskManager
   -tm <arg> | --taskManagerMemory <arg>
-        Memory per TaskManager container [in MB]
+        Memory per TaskManager container with optional unit (default: MB)
   -a <path/to/jar> | --addclasspath <path/to/jar>
         Specifies additional jars to be used in Flink
   --configDir <value>
diff --git a/docs/dev/stream/operators/joining.md b/docs/dev/stream/operators/joining.md
new file mode 100644
index 00000000000..b95aaddbbaa
--- /dev/null
+++ b/docs/dev/stream/operators/joining.md
@@ -0,0 +1,282 @@
+---
+title: "Joining"
+nav-id: streaming_joins
+nav-show_overview: true
+nav-parent_id: streaming_operators
+nav-pos: 11
+---
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+* toc
+{:toc}
+
+# Window Join
+A window join joins the elements of two streams that share a common key and lie in the same window. These windows can be defined by using a [window assigner]({{ site.baseurl}}/dev/stream/operators/windows.html#window-assigners) and are evaluated on elements from both of the streams.
+
+The elements from both sides are then passed to a user-defined `JoinFunction` or `FlatJoinFunction` where the user can emit results that meet the join criteria.
+
+The general usage can be summarized as follows:
+
+{% highlight java %}
+stream.join(otherStream)
+    .where(<KeySelector>)
+    .equalTo(<KeySelector>)
+    .window(<WindowAssigner>)
+    .apply(<JoinFunction>)
+{% endhighlight %}
+
+Some notes on semantics:
+- The creation of pairwise combinations of elements of the two streams behaves like an inner-join, meaning elements from one stream will not be emitted if they don't have a corresponding element from the other stream to be joined with.
+- Those elements that do get joined will have as their timestamp the largest timestamp that still lies in the respective window. For example a window with `[5, 10)` as its boundaries would result in the joined elements having 9 as their timestamp.
+
+In the following section we are going to give an overview over how different kinds of window joins behave using some exemplary scenarios.
+
+## Tumbling Window Join
+When performing a tumbling window join, all elements with a common key and a common tumbling window are joined as pairwise combinations and passed on to a `JoinFunction` or `FlatJoinFunction`. Because this behaves like an inner join, elements of one stream that do not have elements from another stream in their tumbling window are not emitted!
+
+<img src="{{ site.baseurl }}/fig/tumbling-window-join.svg" class="center" style="width: 80%;" />
+
+As illustrated in the figure, we define a tumbling window with the size of 2 milliseconds, which results in windows of the form `[0,1], [2,3], ...`. The image shows the pairwise combinations of all elements in each window which will be passed on to the `JoinFunction`. Note that in the tumbling window `[6,7]` nothing is emitted because no elements exist in the green stream to be joined with the orange elements ⑥ and ⑦.
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+import org.apache.flink.api.java.functions.KeySelector;
+import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
+import org.apache.flink.streaming.api.windowing.time.Time;
+ 
+...
+
+DataStream<Integer> orangeStream = ...
+DataStream<Integer> greenStream = ...
+
+orangeStream.join(greenStream)
+    .where(<KeySelector>)
+    .equalTo(<KeySelector>)
+    .window(TumblingEventTimeWindows.of(Time.seconds(2)))
+    .apply (new JoinFunction<Integer, Integer, String> (){
+        @Override
+        public String join(Integer first, Integer second) {
+            return first + "," + second;
+        }
+    });
+{% endhighlight %}
+</div>
+<div data-lang="scala" markdown="1">
+
+{% highlight scala %}
+import org.apache.flink.streaming.api.windowing.assigners.SlidingEventTimeWindows;
+import org.apache.flink.streaming.api.windowing.time.Time;
+
+...
+
+val orangeStream: DataStream[Integer] = ...
+val greenStream: DataStream[Integer] = ...
+
+orangeStream.join(greenStream)
+    .where(elem => /* select key */)
+    .equalTo(elem => /* select key */)
+    .window(TumblingEventTimeWindows.of(Time.milliseconds(2)))
+    .apply { (e1, e2) => e1 + "," + e2 }
+{% endhighlight %}
+
+</div>
+</div>
+
+## Sliding Window Join
+When performing a sliding window join, all elements with a common key and common sliding window are joined are pairwise combinations and passed on to the `JoinFunction` or `FlatJoinFunction`. Elements of one stream that do not have elements from the other stream in the current sliding window are not emitted! Note that some elements might be joined in one sliding window but not in another!
+
+<img src="{{ site.baseurl }}/fig/sliding-window-join.svg" class="center" style="width: 80%;" />
+
+In this example we are using sliding windows with a size of two milliseconds and slide them by one millisecond, resulting in the sliding windows `[-1, 0],[0,1],[1,2],[2,3], …`.<!-- TODO: Can -1 actually exist?--> The joined elements below the x-axis are the ones that are passed to the `JoinFunction` for each sliding window. Here you can also see how for example the orange ② is joined with the green ③ in the window `[2,3]`, but is not joined with anything in the window `[1,2]`.
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+
+{% highlight java %}
+import org.apache.flink.api.java.functions.KeySelector;
+import org.apache.flink.streaming.api.windowing.assigners.SlidingEventTimeWindows;
+import org.apache.flink.streaming.api.windowing.time.Time;
+
+...
+
+DataStream<Integer> orangeStream = ...
+DataStream<Integer> greenStream = ...
+
+orangeStream.join(greenStream)
+    .where(<KeySelector>)
+    .equalTo(<KeySelector>)
+    .window(SlidingEventTimeWindows.of(Time.milliseconds(2) /* size */, Time.milliseconds(1) /* slide */))
+    .apply (new JoinFunction<Integer, Integer, String> (){
+        @Override
+        public String join(Integer first, Integer second) {
+            return first + "," + second;
+        }
+    });
+{% endhighlight %}
+</div>
+<div data-lang="scala" markdown="1">
+
+{% highlight scala %}
+import org.apache.flink.streaming.api.windowing.assigners.SlidingEventTimeWindows;
+import org.apache.flink.streaming.api.windowing.time.Time;
+
+...
+
+val orangeStream: DataStream[Integer] = ...
+val greenStream: DataStream[Integer] = ...
+
+orangeStream.join(greenStream)
+    .where(elem => /* select key */)
+    .equalTo(elem => /* select key */)
+    .window(SlidingEventTimeWindows.of(Time.milliseconds(2) /* size */, Time.milliseconds(1) /* slide */))
+    .apply { (e1, e2) => e1 + "," + e2 }
+{% endhighlight %}
+</div>
+</div>
+
+## Session Window Join
+When performing a session window join, all elements with the same key that when _"combined"_ fulfill the session criteria are joined in pairwise combinations and passed on to the `JoinFunction` or `FlatJoinFunction`. Again this performs an inner join, so if there is a session window that only contains elements from one stream, no output will be emitted!
+
+<img src="{{ site.baseurl }}/fig/session-window-join.svg" class="center" style="width: 80%;" />
+
+Here we define a session window join where each session is divided by a gap of at least 1ms. There are three sessions, and in the first two sessions the joined elements from both streams are passed to the `JoinFunction`. In the third session there are no elements in the green stream, so ⑧ and ⑨ are not joined!
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+
+{% highlight java %}
+import org.apache.flink.api.java.functions.KeySelector;
+import org.apache.flink.streaming.api.windowing.assigners.EventTimeSessionWindows;
+import org.apache.flink.streaming.api.windowing.time.Time;
+ 
+...
+
+DataStream<Integer> orangeStream = ...
+DataStream<Integer> greenStream = ...
+
+orangeStream.join(greenStream)
+    .where(<KeySelector>)
+    .equalTo(<KeySelector>)
+    .window(EventTimeSessionWindows.withGap(Time.milliseconds(1)))
+    .apply (new JoinFunction<Integer, Integer, String> (){
+        @Override
+        public String join(Integer first, Integer second) {
+            return first + "," + second;
+        }
+    });
+{% endhighlight %}
+</div>
+<div data-lang="scala" markdown="1">
+
+{% highlight scala %}
+import org.apache.flink.streaming.api.windowing.assigners.EventTimeSessionWindows;
+import org.apache.flink.streaming.api.windowing.time.Time;
+ 
+...
+
+val orangeStream: DataStream[Integer] = ...
+val greenStream: DataStream[Integer] = ...
+
+orangeStream.join(greenStream)
+    .where(elem => /* select key */)
+    .equalTo(elem => /* select key */)
+    .window(EventTimeSessionWindows.withGap(Time.milliseconds(1)))
+    .apply { (e1, e2) => e1 + "," + e2 }
+{% endhighlight %}
+
+</div>
+</div>
+
+# Interval Join
+The interval join joins elements of two streams (we'll call them A & B for now) with a common key and where elements of stream B have timestamps that lie in a relative time interval to timestamps of elements in stream A.
+
+This can also be expressed more formally as
+`b.timestamp ∈ [a.timestamp + lowerBound; a.timestamp + upperBound]` or 
+`a.timestamp + lowerBound <= b.timestamp <= a.timestamp + upperBound`
+
+where a and b are elements of A and B that share a common key. Both the lower and upper bound can be either negative or positive as long as as the lower bound is always smaller or equal to the upper bound. The interval join currently only performs inner joins.
+
+When a pair of elements are passed to the `ProcessJoinFunction`, they will be assigned with the larger timestamp (which can be accessed via the `ProcessJoinFunction.Context`) of the two elements.
+
+<span class="label label-info">Note</span> The interval join currently only supports event time.
+
+<img src="{{ site.baseurl }}/fig/interval-join.svg" class="center" style="width: 80%;" />
+
+In the example above, we join two streams 'orange' and 'green' with a lower bound of -2 milliseconds and an upper bound of +1 millisecond. Be default, these boundaries are inclusive, but `.lowerBoundExclusive()` and `.upperBoundExclusive` can be applied to change the behaviour.
+
+Using the more formal notation again this will translate to 
+
+`orangeElem.ts + lowerBound <= greenElem.ts <= orangeElem.ts + upperBound`
+
+as indicated by the triangles.
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+
+{% highlight java %}
+import org.apache.flink.api.java.functions.KeySelector;
+import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
+import org.apache.flink.streaming.api.windowing.time.Time;
+
+...
+
+DataStream<Integer> orangeStream = ...
+DataStream<Integer> greenStream = ...
+
+orangeStream
+    .keyBy(<KeySelector>)
+    .intervalJoin(greenStream.keyBy(<KeySelector>))
+    .between(Time.milliseconds(-2), Time.milliseconds(1))
+    .process (new ProcessJoinFunction<Integer, Integer, String(){
+
+        @Override
+        public void processElement(Integer left, Integer right, Context ctx, Collector<String> out) {
+            out.collect(first + "," + second);
+        }
+    });
+{% endhighlight %}
+
+</div>
+<div data-lang="scala" markdown="1">
+
+{% highlight scala %}
+import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
+import org.apache.flink.streaming.api.windowing.time.Time;
+
+...
+
+val orangeStream: DataStream[Integer] = ...
+val greenStream: DataStream[Integer] = ...
+
+orangeStream
+    .keyBy(elem => /* select key */)
+    .intervalJoin(greenStream.keyBy(elem => /* select key */))
+    .between(Time.milliseconds(-2), Time.milliseconds(1))
+    .process(new ProcessJoinFunction[Integer, Integer, String] {
+        override def processElement(left: Integer, right: Integer, ctx: ProcessJoinFunction[Integer, Integer, String]#Context, out: Collector[String]): Unit = {
+         out.collect(left + "," + right); 
+        }
+      });
+    });
+{% endhighlight %}
+
+</div>
+</div>
diff --git a/docs/dev/stream/operators/process_function.md b/docs/dev/stream/operators/process_function.md
index 4f36721adc7..b2a373eaa5f 100644
--- a/docs/dev/stream/operators/process_function.md
+++ b/docs/dev/stream/operators/process_function.md
@@ -277,19 +277,18 @@ Both types of timers (processing-time and event-time) are internally maintained
 
 The `TimerService` deduplicates timers per key and timestamp, i.e., there is at most one timer per key and timestamp. If multiple timers are registered for the same timestamp, the `onTimer()` method will be called just once.
 
-**Note:** Flink synchronizes invocations of `onTimer()` and `processElement()`. Hence, users do not have to worry about concurrent modification of state.
+<span class="label label-info">Note</span> Flink synchronizes invocations of `onTimer()` and `processElement()`. Hence, users do not have to worry about concurrent modification of state.
 
 ### Fault Tolerance
 
 Timers are fault tolerant and checkpointed along with the state of the application. 
 In case of a failure recovery or when starting an application from a savepoint, the timers are restored.
 
-**Note:** Checkpointed processing-time timers that were supposed to fire before their restoration, will fire immediately. 
+<span class="label label-info">Note</span> Checkpointed processing-time timers that were supposed to fire before their restoration, will fire immediately.
 This might happen when an application recovers from a failure or when it is started from a savepoint.
 
-**Note:** Timers are always synchronously checkpointed, regardless of the configuration of the state backends. 
-Therefore, a large number of timers can significantly increase checkpointing time. 
-See the "Timer Coalescing" section for advice on how to reduce the number of timers.
+<span class="label label-info">Note</span> Timers are always asynchronously checkpointed, except for the combination of RocksDB backend / with incremental snapshots / with heap-based timers (will be resolved with `FLINK-10026`).
+Notice that large numbers of timers can increase the checkpointing time because timers are part of the checkpointed state. See the "Timer Coalescing" section for advice on how to reduce the number of timers.
 
 ### Timer Coalescing
 
@@ -333,3 +332,43 @@ ctx.timerService.registerEventTimeTimer(coalescedTime)
 {% endhighlight %}
 </div>
 </div>
+
+Timers can also be stopped and removed as follows:
+
+Stopping a processing-time timer:
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+long timestampOfTimerToStop = ...
+ctx.timerService().deleteProcessingTimeTimer(timestampOfTimerToStop);
+{% endhighlight %}
+</div>
+
+<div data-lang="scala" markdown="1">
+{% highlight scala %}
+val timestampOfTimerToStop = ...
+ctx.timerService.deleteProcessingTimeTimer(timestampOfTimerToStop)
+{% endhighlight %}
+</div>
+</div>
+
+Stopping an event-time timer:
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+long timestampOfTimerToStop = ...
+ctx.timerService().deleteEventTimeTimer(timestampOfTimerToStop);
+{% endhighlight %}
+</div>
+
+<div data-lang="scala" markdown="1">
+{% highlight scala %}
+val timestampOfTimerToStop = ...
+ctx.timerService.deleteEventTimeTimer(timestampOfTimerToStop)
+{% endhighlight %}
+</div>
+</div>
+
+<span class="label label-info">Note</span> Stopping a timer has no effect if no such timer with the given timestamp is registered.
diff --git a/docs/dev/stream/operators/windows.md b/docs/dev/stream/operators/windows.md
index f657c80379d..bebc5dd260e 100644
--- a/docs/dev/stream/operators/windows.md
+++ b/docs/dev/stream/operators/windows.md
@@ -724,17 +724,19 @@ A `ProcessWindowFunction` can be defined and used like this:
 DataStream<Tuple2<String, Long>> input = ...;
 
 input
-    .keyBy(<key selector>)
-    .window(<window assigner>)
-    .process(new MyProcessWindowFunction());
+  .keyBy(t -> t.f0)
+  .timeWindow(Time.minutes(5))
+  .process(new MyProcessWindowFunction());
 
 /* ... */
 
-public class MyProcessWindowFunction extends ProcessWindowFunction<Tuple<String, Long>, String, String, TimeWindow> {
+public class MyProcessWindowFunction 
+    extends ProcessWindowFunction<Tuple2<String, Long>, String, String, TimeWindow> {
 
-  void process(String key, Context context, Iterable<Tuple<String, Long>> input, Collector<String> out) {
+  @Override
+  public void process(String key, Context context, Iterable<Tuple2<String, Long>> input, Collector<String> out) {
     long count = 0;
-    for (Tuple<String, Long> in: input) {
+    for (Tuple2<String, Long> in: input) {
       count++;
     }
     out.collect("Window: " + context.window() + "count: " + count);
@@ -749,9 +751,9 @@ public class MyProcessWindowFunction extends ProcessWindowFunction<Tuple<String,
 val input: DataStream[(String, Long)] = ...
 
 input
-    .keyBy(<key selector>)
-    .window(<window assigner>)
-    .process(new MyProcessWindowFunction())
+  .keyBy(_._1)
+  .timeWindow(Time.minutes(5))
+  .process(new MyProcessWindowFunction())
 
 /* ... */
 
@@ -969,7 +971,7 @@ private static class MyFoldFunction
 
   public Tuple3<String, Long, Integer> fold(Tuple3<String, Long, Integer> acc, SensorReading s) {
       Integer cur = acc.getField(2);
-      acc.setField(2, cur + 1);
+      acc.setField(cur + 1, 2);
       return acc;
   }
 }
diff --git a/docs/dev/stream/python.md b/docs/dev/stream/python.md
index 887d983c404..29b2f30be0a 100644
--- a/docs/dev/stream/python.md
+++ b/docs/dev/stream/python.md
@@ -227,7 +227,7 @@ Data transformations transform one or more DataStreams into a new DataStream. Pr
 multiple transformations into sophisticated assemblies.
 
 This section gives a brief overview of the available transformations. The [transformations
-documentation](dataset_transformations.html) has a full description of all transformations with
+documentation](./operators/index.html) has a full description of all transformations with
 examples.
 
 <br />
@@ -322,7 +322,7 @@ data.reduce(Sum())
       <td>
         <p>Windows can be defined on already partitioned KeyedStreams. Windows group the data in each
         key according to some characteristic (e.g., the data that arrived within the last 5 seconds).
-        See <a href="windows.html">windows</a> for a complete description of windows.
+        See <a href="./operators/windows.html">windows</a> for a complete description of windows.
     {% highlight python %}
 keyed_stream.count_window(10, 5)  # Last 10 elements, sliding (jumping) by 5 elements
 
@@ -624,7 +624,7 @@ env.execute()
 
 A system-wide default parallelism for all execution environments can be defined by setting the
 `parallelism.default` property in `./conf/flink-conf.yaml`. See the
-[Configuration]({{ site.baseurl }}/setup/config.html) documentation for details.
+[Configuration]({{ site.baseurl }}/ops/config.html) documentation for details.
 
 {% top %}
 
diff --git a/docs/dev/stream/state/state.md b/docs/dev/stream/state/state.md
index 44a3653a61f..decf1dbe9de 100644
--- a/docs/dev/stream/state/state.md
+++ b/docs/dev/stream/state/state.md
@@ -266,6 +266,136 @@ a `ValueState`. Once the count reaches 2 it will emit the average and clear the
 we start over from `0`. Note that this would keep a different state value for each different input
 key if we had tuples with different values in the first field.
 
+### State Time-To-Live (TTL)
+
+A *time-to-live* (TTL) can be assigned to the keyed state of any type. If a TTL is configured and a
+state value has expired, the stored value will be cleaned up on a best effort basis which is
+discussed in more detail below.
+
+All state collection types support per-entry TTLs. This means that list elements and map entries
+expire independently.
+
+In order to use state TTL one must first build a `StateTtlConfig` configuration object. The TTL 
+functionality can then be enabled in any state descriptor by passing the configuration:
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+import org.apache.flink.api.common.state.StateTtlConfig;
+import org.apache.flink.api.common.state.ValueStateDescriptor;
+import org.apache.flink.api.common.time.Time;
+
+StateTtlConfig ttlConfig = StateTtlConfig
+    .newBuilder(Time.seconds(1))
+    .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite)
+    .setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired)
+    .build();
+    
+ValueStateDescriptor<String> stateDescriptor = new ValueStateDescriptor<>("text state", String.class);
+stateDescriptor.enableTimeToLive(ttlConfig);
+{% endhighlight %}
+</div>
+
+<div data-lang="scala" markdown="1">
+{% highlight scala %}
+import org.apache.flink.api.common.state.StateTtlConfig
+import org.apache.flink.api.common.state.ValueStateDescriptor
+import org.apache.flink.api.common.time.Time
+
+val ttlConfig = StateTtlConfig
+    .newBuilder(Time.seconds(1))
+    .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite)
+    .setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired)
+    .build
+    
+val stateDescriptor = new ValueStateDescriptor[String]("text state", classOf[String])
+stateDescriptor.enableTimeToLive(ttlConfig)
+{% endhighlight %}
+</div>
+</div>
+
+The configuration has several options to consider:
+
+The first parameter of the `newBuilder` method is mandatory, it is the time-to-live value.
+
+The update type configures when the state TTL is refreshed (by default `OnCreateAndWrite`):
+
+ - `StateTtlConfig.UpdateType.OnCreateAndWrite` - only on creation and write access
+ - `StateTtlConfig.UpdateType.OnReadAndWrite` - also on read access
+ 
+The state visibility configures whether the expired value is returned on read access 
+if it is not cleaned up yet (by default `NeverReturnExpired`):
+
+ - `StateTtlConfig.StateVisibility.NeverReturnExpired` - expired value is never returned
+ - `StateTtlConfig.StateVisibility.ReturnExpiredIfNotCleanedUp` - returned if still available
+ 
+In case of `NeverReturnExpired`, the expired state behaves as if it does not exist anymore, 
+even if it still has to be removed. The option can be useful for use cases 
+where data has to become unavailable for read access strictly after TTL, 
+e.g. application working with privacy sensitive data.
+ 
+Another option `ReturnExpiredIfNotCleanedUp` allows to return the expired state before its cleanup.
+
+**Notes:** 
+
+- The state backends store the timestamp of the last modification along with the user value, 
+which means that enabling this feature increases consumption of state storage. 
+Heap state backend stores an additional Java object with a reference to the user state object 
+and a primitive long value in memory. The RocksDB state backend adds 8 bytes per stored value, list entry or map entry.
+
+- Only TTLs in reference to *processing time* are currently supported.
+
+- Trying to restore state, which was previously configured without TTL, using TTL enabled descriptor or vice versa
+will lead to compatibility failure and `StateMigrationException`.
+
+- The TTL configuration is not part of check- or savepoints but rather a way of how Flink treats it in the currently running job.
+
+- The map state with TTL currently supports null user values only if the user value serializer can handle null values. 
+If the serializer does not support null values, it can be wrapped with `NullableSerializer` at the cost of an extra byte in the serialized form.
+
+#### Cleanup of Expired State
+
+Currently, expired values are only removed when they are read out explicitly, 
+e.g. by calling `ValueState.value()`.
+
+<span class="label label-danger">Attention</span> This means that by default if expired state is not read, 
+it won't be removed, possibly leading to ever growing state. This might change in future releases. 
+
+Additionally, you can activate the cleanup at the moment of taking the full state snapshot which 
+will reduce its size. The local state is not cleaned up under the current implementation 
+but it will not include the removed expired state in case of restoration from the previous snapshot.
+It can be configured in `StateTtlConfig`:
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+import org.apache.flink.api.common.state.StateTtlConfig;
+import org.apache.flink.api.common.time.Time;
+
+StateTtlConfig ttlConfig = StateTtlConfig
+    .newBuilder(Time.seconds(1))
+    .cleanupFullSnapshot()
+    .build();
+{% endhighlight %}
+</div>
+
+<div data-lang="scala" markdown="1">
+{% highlight scala %}
+import org.apache.flink.api.common.state.StateTtlConfig
+import org.apache.flink.api.common.time.Time
+
+val ttlConfig = StateTtlConfig
+    .newBuilder(Time.seconds(1))
+    .cleanupFullSnapshot
+    .build
+{% endhighlight %}
+</div>
+</div>
+
+This option is not applicable for the incremental checkpointing in the RocksDB state backend.
+
+More strategies will be added in the future for cleaning up expired state automatically in the background.
+
 ### State in the Scala DataStream API
 
 In addition to the interface described above, the Scala API has shortcuts for stateful
diff --git a/docs/dev/table/connect.md b/docs/dev/table/connect.md
new file mode 100644
index 00000000000..7a9e141083e
--- /dev/null
+++ b/docs/dev/table/connect.md
@@ -0,0 +1,1049 @@
+---
+title: "Connect to External Systems"
+nav-parent_id: tableapi
+nav-pos: 19
+---
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+Flink's Table API & SQL programs can be connected to other external systems for reading and writing both batch and streaming tables. A table source provides access to data which is stored in external systems (such as a database, key-value store, message queue, or file system). A table sink emits a table to an external storage system. Depending on the type of source and sink, they support different formats such as CSV, Parquet, or ORC.
+
+This page describes how to declare built-in table sources and/or table sinks and register them in Flink. After a source or sink has been registered, it can be accessed by Table API & SQL statements.
+
+<span class="label label-danger">Attention</span> If you want to implement your own *custom* table source or sink, have a look at the [user-defined sources & sinks page](sourceSinks.html).
+
+* This will be replaced by the TOC
+{:toc}
+
+Dependencies
+------------
+
+The following table list all available connectors and formats. Their mutual compatibility is tagged in the corresponding sections for [table connectors](connect.html#table-connectors) and [table formats](connect.html#table-formats). The following table provides dependency information for both projects using a build automation tool (such as Maven or SBT) and SQL Client with SQL JAR bundles.
+
+{% if site.is_stable %}
+
+### Connectors
+
+| Name              | Version       | Maven dependency             | SQL Client JAR         |
+| :---------------- | :------------ | :--------------------------- | :----------------------|
+| Filesystem        |               | Built-in                     | Built-in               |
+| Apache Kafka      | 0.8           | `flink-connector-kafka-0.8`  | Not available          |
+| Apache Kafka      | 0.9           | `flink-connector-kafka-0.9`  | [Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.9{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.9{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar) |
+| Apache Kafka      | 0.10          | `flink-connector-kafka-0.10` | [Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.10{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.10{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar) |
+| Apache Kafka      | 0.11          | `flink-connector-kafka-0.11` | [Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.11{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.11{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar) |
+
+### Formats
+
+| Name              | Maven dependency             | SQL Client JAR         |
+| :---------------- | :--------------------------- | :--------------------- |
+| CSV               | Built-in                     | Built-in               |
+| JSON              | `flink-json`                 | [Download](http://central.maven.org/maven2/org/apache/flink/flink-json/{{site.version}}/flink-json-{{site.version}}-sql-jar.jar) |
+| Apache Avro       | `flink-avro`                 | [Download](http://central.maven.org/maven2/org/apache/flink/flink-avro/{{site.version}}/flink-avro-{{site.version}}-sql-jar.jar) |
+
+{% else %}
+
+This table is only available for stable releases.
+
+{% endif %}
+
+{% top %}
+
+Overview
+--------
+
+Beginning from Flink 1.6, the declaration of a connection to an external system is separated from the actual implementation.
+
+Connections can be specified either
+
+- **programmatically** using a `Descriptor` under `org.apache.flink.table.descriptors` for Table & SQL API
+- or **declaratively** via [YAML configuration files](http://yaml.org/) for the SQL Client.
+
+This allows not only for better unification of APIs and SQL Client but also for better extensibility in case of [custom implementations](sourceSinks.html) without changing the actual declaration.
+
+Every declaration is similar to a SQL `CREATE TABLE` statement. One can define the name of the table, the schema of the table, a connector, and a data format upfront for connecting to an external system.
+
+The **connector** describes the external system that stores the data of a table. Storage systems such as [Apacha Kafka](http://kafka.apache.org/) or a regular file system can be declared here. The connector might already provide a fixed format with fields and schema.
+
+Some systems support different **data formats**. For example, a table that is stored in Kafka or in files can encode its rows with CSV, JSON, or Avro. A database connector might need the table schema here. Whether or not a storage system requires the definition of a format, is documented for every [connector](connect.html#table-connectors). Different systems also require different [types of formats](connect.html#table-formats) (e.g., column-oriented formats vs. row-oriented formats). The documentation states which format types and connectors are compatible.
+
+The **table schema** defines the schema of a table that is exposed to SQL queries. It describes how a source maps the data format to the table schema and a sink vice versa. The schema has access to fields defined by the connector or format. It can use one or more fields for extracting or inserting [time attributes](streaming.html#time-attributes). If input fields have no determinstic field order, the schema clearly defines column names, their order, and origin.
+
+The subsequent sections will cover each definition part ([connector](connect.html#table-connectors), [format](connect.html#table-formats), and [schema](connect.html#table-schema)) in more detail. The following example shows how to pass them:
+
+<div class="codetabs" markdown="1">
+<div data-lang="Java/Scala" markdown="1">
+{% highlight java %}
+tableEnvironment
+  .connect(...)
+  .withFormat(...)
+  .withSchema(...)
+  .inAppendMode()
+  .registerTableSource("MyTable")
+{% endhighlight %}
+</div>
+
+<div data-lang="YAML" markdown="1">
+{% highlight yaml %}
+name: MyTable
+type: source
+update-mode: append
+connector: ...
+format: ...
+schema: ...
+{% endhighlight %}
+</div>
+</div>
+
+The table's type (`source`, `sink`, or `both`) determines how a table is registered. In case of table type `both`, both a table source and table sink are registered under the same name. Logically, this means that we can both read and write to such a table similarly to a table in a regular DBMS.
+
+For streaming queries, an [update mode](connect.html#update-mode) declares how to communicate between a dynamic table and the storage system for continous queries.
+
+The following code shows a full example of how to connect to Kafka for reading Avro records.
+
+<div class="codetabs" markdown="1">
+<div data-lang="Java/Scala" markdown="1">
+{% highlight java %}
+tableEnvironment
+  // declare the external system to connect to
+  .connect(
+    new Kafka()
+      .version("0.10")
+      .topic("test-input")
+      .startFromEarliest()
+      .property("zookeeper.connect", "localhost:2181")
+      .property("bootstrap.servers", "localhost:9092")
+  )
+
+  // declare a format for this system
+  .withFormat(
+    new Avro()
+      .avroSchema(
+        "{" +
+        "  \"namespace\": \"org.myorganization\"," +
+        "  \"type\": \"record\"," +
+        "  \"name\": \"UserMessage\"," +
+        "    \"fields\": [" +
+        "      {\"name\": \"timestamp\", \"type\": \"string\"}," +
+        "      {\"name\": \"user\", \"type\": \"long\"}," +
+        "      {\"name\": \"message\", \"type\": [\"string\", \"null\"]}" +
+        "    ]" +
+        "}" +
+      )
+  )
+
+  // declare the schema of the table
+  .withSchema(
+    new Schema()
+      .field("rowtime", Types.SQL_TIMESTAMP)
+        .rowtime(new Rowtime()
+          .timestampsFromField("ts")
+          .watermarksPeriodicBounded(60000)
+        )
+      .field("user", Types.LONG)
+      .field("message", Types.STRING)
+  )
+
+  // specify the update-mode for streaming tables
+  .inAppendMode()
+
+  // register as source, sink, or both and under a name
+  .registerTableSource("MyUserTable");
+{% endhighlight %}
+</div>
+
+<div data-lang="YAML" markdown="1">
+{% highlight yaml %}
+tables:
+  - name: MyUserTable      # name the new table
+    type: source           # declare if the table should be "source", "sink", or "both"
+    update-mode: append    # specify the update-mode for streaming tables
+
+    # declare the external system to connect to
+    connector:
+      type: kafka
+      version: "0.10"
+      topic: test-input
+      startup-mode: earliest-offset
+      properties:
+        - key: zookeeper.connect
+          value: localhost:2181
+        - key: bootstrap.servers
+          value: localhost:9092
+
+    # declare a format for this system
+    format:
+      type: avro
+      avro-schema: >
+        {
+          "namespace": "org.myorganization",
+          "type": "record",
+          "name": "UserMessage",
+            "fields": [
+              {"name": "ts", "type": "string"},
+              {"name": "user", "type": "long"},
+              {"name": "message", "type": ["string", "null"]}
+            ]
+        }
+
+    # declare the schema of the table
+    schema:
+      - name: rowtime
+        type: TIMESTAMP
+        rowtime:
+          timestamps:
+            type: from-field
+            from: ts
+          watermarks:
+            type: periodic-bounded
+            delay: "60000"
+      - name: user
+        type: BIGINT
+      - name: message
+        type: VARCHAR
+{% endhighlight %}
+</div>
+</div>
+
+In both ways the desired connection properties are converted into normalized, string-based key-value pairs. So-called [table factories](sourceSinks.html#define-a-tablefactory) create configured table sources, table sinks, and corresponding formats from the key-value pairs. All table factories that can be found via Java's [Service Provider Interfaces (SPI)](https://docs.oracle.com/javase/tutorial/sound/SPI-intro.html) are taken into account when searching for exactly-one matching table factory.
+
+If no factory can be found or multiple factories match for the given properties, an exception will be thrown with additional information about considered factories and supported properties.
+
+{% top %}
+
+Table Schema
+------------
+
+The table schema defines the names and types of columns similar to the column definitions of a SQL `CREATE TABLE` statement. In addition, one can specify how columns are mapped from and to fields of the format in which the table data is encoded. The origin of a field might be important if the name of the column should differ from the input/output format. For instance, a column `user_name` should reference the field `$$-user-name` from a JSON format. Additionally, the schema is needed to map types from an external system to Flink's representation. In case of a table sink, it ensures that only data with valid schema is written to an external system.
+
+The following example shows a simple schema without time attributes and one-to-one field mapping of input/output to table columns.
+
+<div class="codetabs" markdown="1">
+<div data-lang="Java/Scala" markdown="1">
+{% highlight java %}
+.withSchema(
+  new Schema()
+    .field("MyField1", Types.INT)     // required: specify the fields of the table (in this order)
+    .field("MyField2", Types.STRING)
+    .field("MyField3", Types.BOOLEAN)
+)
+{% endhighlight %}
+</div>
+
+<div data-lang="YAML" markdown="1">
+{% highlight yaml %}
+schema:
+  - name: MyField1    # required: specify the fields of the table (in this order)
+    type: INT
+  - name: MyField2
+    type: VARCHAR
+  - name: MyField3
+    type: BOOLEAN
+{% endhighlight %}
+</div>
+</div>
+
+For *each field*, the following properties can be declared in addition to the column's name and type:
+
+<div class="codetabs" markdown="1">
+<div data-lang="Java/Scala" markdown="1">
+{% highlight java %}
+.withSchema(
+  new Schema()
+    .field("MyField1", Types.SQL_TIMESTAMP)
+      .proctime()      // optional: declares this field as a processing-time attribute
+    .field("MyField2", Types.SQL_TIMESTAMP)
+      .rowtime(...)    // optional: declares this field as a event-time attribute
+    .field("MyField3", Types.BOOLEAN)
+      .from("mf3")     // optional: original field in the input that is referenced/aliased by this field
+)
+{% endhighlight %}
+</div>
+
+<div data-lang="YAML" markdown="1">
+{% highlight yaml %}
+schema:
+  - name: MyField1
+    type: TIMESTAMP
+    proctime: true    # optional: boolean flag whether this field should be a processing-time attribute
+  - name: MyField2
+    type: TIMESTAMP
+    rowtime: ...      # optional: wether this field should be a event-time attribute
+  - name: MyField3
+    type: BOOLEAN
+    from: mf3         # optional: original field in the input that is referenced/aliased by this field
+{% endhighlight %}
+</div>
+</div>
+
+Time attributes are essential when working with unbounded streaming tables. Therefore both processing-time and event-time (also known as "rowtime") attributes can be defined as part of the schema.
+
+For more information about time handling in Flink and especially event-time, we recommend the general [event-time section](streaming.html#time-attributes).
+
+### Rowtime Attributes
+
+In order to control the event-time behavior for tables, Flink provides predefined timestamp extractors and watermark strategies.
+
+The following timestamp extractors are supported:
+
+<div class="codetabs" markdown="1">
+<div data-lang="Java/Scala" markdown="1">
+{% highlight java %}
+// Converts an existing LONG or SQL_TIMESTAMP field in the input into the rowtime attribute.
+.rowtime(
+  new Rowtime()
+    .timestampsFromField("ts_field")    // required: original field name in the input
+)
+
+// Converts the assigned timestamps from a DataStream API record into the rowtime attribute 
+// and thus preserves the assigned timestamps from the source.
+// This requires a source that assigns timestamps (e.g., Kafka 0.10+).
+.rowtime(
+  new Rowtime()
+    .timestampsFromSource()
+)
+
+// Sets a custom timestamp extractor to be used for the rowtime attribute.
+// The extractor must extend `org.apache.flink.table.sources.tsextractors.TimestampExtractor`.
+.rowtime(
+  new Rowtime()
+    .timestampsFromExtractor(...)
+)
+{% endhighlight %}
+</div>
+
+<div data-lang="YAML" markdown="1">
+{% highlight yaml %}
+# Converts an existing BIGINT or TIMESTAMP field in the input into the rowtime attribute.
+rowtime:
+  timestamps:
+    type: from-field
+    from: "ts_field"                 # required: original field name in the input
+
+# Converts the assigned timestamps from a DataStream API record into the rowtime attribute 
+# and thus preserves the assigned timestamps from the source.
+rowtime:
+  timestamps:
+    type: from-source
+{% endhighlight %}
+</div>
+</div>
+
+The following watermark strategies are supported:
+
+<div class="codetabs" markdown="1">
+<div data-lang="Java/Scala" markdown="1">
+{% highlight java %}
+// Sets a watermark strategy for ascending rowtime attributes. Emits a watermark of the maximum 
+// observed timestamp so far minus 1. Rows that have a timestamp equal to the max timestamp
+// are not late.
+.rowtime(
+  new Rowtime()
+    .watermarksPeriodicAscending()
+)
+
+// Sets a built-in watermark strategy for rowtime attributes which are out-of-order by a bounded time interval.
+// Emits watermarks which are the maximum observed timestamp minus the specified delay.
+.rowtime(
+  new Rowtime()
+    .watermarksPeriodicBounded(2000)    // delay in milliseconds
+)
+
+// Sets a built-in watermark strategy which indicates the watermarks should be preserved from the
+// underlying DataStream API and thus preserves the assigned watermarks from the source.
+.rowtime(
+  new Rowtime()
+    .watermarksFromSource()
+)
+{% endhighlight %}
+</div>
+
+<div data-lang="YAML" markdown="1">
+{% highlight yaml %}
+# Sets a watermark strategy for ascending rowtime attributes. Emits a watermark of the maximum 
+# observed timestamp so far minus 1. Rows that have a timestamp equal to the max timestamp
+# are not late.
+rowtime:
+  watermarks:
+    type: periodic-ascending
+
+# Sets a built-in watermark strategy for rowtime attributes which are out-of-order by a bounded time interval.
+# Emits watermarks which are the maximum observed timestamp minus the specified delay.
+rowtime:
+  watermarks:
+    type: periodic-bounded
+    delay: ...                # required: delay in milliseconds
+
+# Sets a built-in watermark strategy which indicates the watermarks should be preserved from the
+# underlying DataStream API and thus preserves the assigned watermarks from the source.
+rowtime:
+  watermarks:
+    type: from-source
+{% endhighlight %}
+</div>
+</div>
+
+Make sure to always declare both timestamps and watermarks. Watermarks are required for triggering time-based operations.
+
+### Type Strings
+
+Because type information is only available in a programming language, the following type strings are supported for being defined in a YAML file:
+
+{% highlight yaml %}
+VARCHAR
+BOOLEAN
+TINYINT
+SMALLINT
+INT
+BIGINT
+FLOAT
+DOUBLE
+DECIMAL
+DATE
+TIME
+TIMESTAMP
+ROW(fieldtype, ...)              # unnamed row; e.g. ROW(VARCHAR, INT) that is mapped to Flink's RowTypeInfo
+                                 # with indexed fields names f0, f1, ...
+ROW(fieldname fieldtype, ...)    # named row; e.g., ROW(myField VARCHAR, myOtherField INT) that
+                                 # is mapped to Flink's RowTypeInfo
+POJO(class)                      # e.g., POJO(org.mycompany.MyPojoClass) that is mapped to Flink's PojoTypeInfo
+ANY(class)                       # e.g., ANY(org.mycompany.MyClass) that is mapped to Flink's GenericTypeInfo
+ANY(class, serialized)           # used for type information that is not supported by Flink's Table & SQL API
+{% endhighlight %}
+
+{% top %}
+
+Update Modes
+------------
+
+For streaming queries, it is required to declare how to perform the [conversion between a dynamic table and an external connector](streaming.html#dynamic-tables--continuous-queries). The *update mode* specifies which kind of messages should be exchanged with the external system:
+
+**Append Mode:** In append mode, a dynamic table and an external connector only exchange INSERT messages.
+
+**Retract Mode:** In retract mode, a dynamic table and an external connector exchange ADD and RETRACT messages. An INSERT change is encoded as an ADD message, a DELETE change as a RETRACT message, and an UPDATE change as a RETRACT message for the updated (previous) row and an ADD message for the updating (new) row. In this mode, a key must not be defined as opposed to upsert mode. However, every update consists of two messages which is less efficient.
+
+**Upsert Mode:** In upsert mode, a dynamic table and an external connector exchange UPSERT and DELETE messages. This mode requires a (possibly composite) unique key by which updates can be propagated. The external connector needs to be aware of the unique key attribute in order to apply messages correctly. INSERT and UPDATE changes are encoded as UPSERT messages. DELETE changes as DELETE messages. The main difference to a retract stream is that UPDATE changes are encoded with a single message and are therefore more efficient.
+
+<span class="label label-danger">Attention</span> The documentation of each connector states which update modes are supported.
+
+<div class="codetabs" markdown="1">
+<div data-lang="Java/Scala" markdown="1">
+{% highlight java %}
+.connect(...)
+  .inAppendMode()    // otherwise: inUpsertMode() or inRetractMode()
+{% endhighlight %}
+</div>
+
+<div data-lang="YAML" markdown="1">
+{% highlight yaml %}
+tables:
+  - name: ...
+    update-mode: append    # otherwise: "retract" or "upsert"
+{% endhighlight %}
+</div>
+</div>
+
+See also the [general streaming concepts documentation](streaming.html#dynamic-tables--continuous-queries) for more information.
+
+{% top %}
+
+Table Connectors
+----------------
+
+Flink provides a set of connectors for connecting to external systems.
+
+Please note that not all connectors are available in both batch and streaming yet. Furthermore, not every streaming connector supports every streaming mode. Therefore, each connector is tagged accordingly. A format tag indicates that the connector requires a certain type of format.
+
+### File System Connector
+
+<span class="label label-primary">Source: Batch</span>
+<span class="label label-primary">Source: Streaming Append Mode</span>
+<span class="label label-primary">Sink: Batch</span>
+<span class="label label-primary">Sink: Streaming Append Mode</span>
+<span class="label label-info">Format: CSV-only</span>
+
+The file system connector allows for reading and writing from a local or distributed filesystem. A filesystem can be defined as:
+
+<div class="codetabs" markdown="1">
+<div data-lang="Java/Scala" markdown="1">
+{% highlight java %}
+.connect(
+  new FileSystem()
+    .path("file:///path/to/whatever")    // required: path to a file or directory
+)
+{% endhighlight %}
+</div>
+
+<div data-lang="YAML" markdown="1">
+{% highlight yaml %}
+connector:
+  type: filesystem
+  path: "file:///path/to/whatever"    # required: path to a file or directory
+{% endhighlight %}
+</div>
+</div>
+
+The file system connector itself is included in Flink and does not require an additional dependency. A corresponding format needs to be specified for reading and writing rows from and to a file system.
+
+<span class="label label-danger">Attention</span> Make sure to include [Flink File System specific dependencies]({{ site.baseurl }}/ops/filesystems.html).
+
+<span class="label label-danger">Attention</span> File system sources and sinks for streaming are only experimental. In the future, we will support actual streaming use cases, i.e., directory monitoring and bucket output.
+
+### Kafka Connector
+
+<span class="label label-primary">Source: Streaming Append Mode</span>
+<span class="label label-primary">Sink: Streaming Append Mode</span>
+<span class="label label-info">Format: Serialization Schema</span>
+<span class="label label-info">Format: Deserialization Schema</span>
+
+The Kafka connector allows for reading and writing from and to an Apache Kafka topic. It can be defined as follows:
+
+<div class="codetabs" markdown="1">
+<div data-lang="Java/Scala" markdown="1">
+{% highlight java %}
+.connect(
+  new Kafka()
+    .version("0.11")    // required: valid connector versions are "0.8", "0.9", "0.10", and "0.11"
+    .topic("...")       // required: topic name from which the table is read
+
+    // optional: connector specific properties
+    .property("zookeeper.connect", "localhost:2181")
+    .property("bootstrap.servers", "localhost:9092")
+    .property("group.id", "testGroup")
+
+    // optional: select a startup mode for Kafka offsets
+    .startFromEarliest()
+    .startFromLatest()
+    .startFromSpecificOffsets(...)
+
+    // optional: output partitioning from Flink's partitions into Kafka's partitions
+    .sinkPartitionerFixed()         // each Flink partition ends up in at-most one Kafka partition (default)
+    .sinkPartitionerRoundRobin()    // a Flink partition is distributed to Kafka partitions round-robin
+    .sinkPartitionerCustom(MyCustom.class)    // use a custom FlinkKafkaPartitioner subclass
+)
+{% endhighlight %}
+</div>
+
+<div data-lang="YAML" markdown="1">
+{% highlight yaml %}
+connector:
+  type: kafka
+  version: "0.11"     # required: valid connector versions are "0.8", "0.9", "0.10", and "0.11"
+  topic: ...          # required: topic name from which the table is read
+
+  properties:         # optional: connector specific properties
+    - key: zookeeper.connect
+      value: localhost:2181
+    - key: bootstrap.servers
+      value: localhost:9092
+    - key: group.id
+      value: testGroup
+
+  startup-mode: ...   # optional: valid modes are "earliest-offset", "latest-offset",
+                      # "group-offsets", or "specific-offsets"
+  specific-offsets:   # optional: used in case of startup mode with specific offsets
+    - partition: 0
+      offset: 42
+    - partition: 1
+      offset: 300
+
+  sink-partitioner: ...    # optional: output partitioning from Flink's partitions into Kafka's partitions
+                           # valid are "fixed" (each Flink partition ends up in at most one Kafka partition),
+                           # "round-robin" (a Flink partition is distributed to Kafka partitions round-robin)
+                           # "custom" (use a custom FlinkKafkaPartitioner subclass)
+  sink-partitioner-class: org.mycompany.MyPartitioner  # optional: used in case of sink partitioner custom
+{% endhighlight %}
+</div>
+</div>
+
+**Specify the start reading position:** By default, the Kafka source will start reading data from the committed group offsets in Zookeeper or Kafka brokers. You can specify other start positions, which correspond to the configurations in section [Kafka Consumers Start Position Configuration]({{ site.baseurl }}/dev/connectors/kafka.html#kafka-consumers-start-position-configuration).
+
+**Flink-Kafka Sink Partitioning:** By default, a Kafka sink writes to at most as many partitions as its own parallelism (each parallel instance of the sink writes to exactly one partition). In order to distribute the writes to more partitions or control the routing of rows into partitions, a custom sink partitioner can be provided. The round-robin partitioner is useful to avoid an unbalanced partitioning. However, it will cause a lot of network connections between all the Flink instances and all the Kafka brokers.
+
+**Consistency guarantees:** By default, a Kafka sink ingests data with at-least-once guarantees into a Kafka topic if the query is executed with [checkpointing enabled]({{ site.baseurl }}/dev/stream/state/checkpointing.html#enabling-and-configuring-checkpointing).
+
+**Kafka 0.10+ Timestamps:** Since Kafka 0.10, Kafka messages have a timestamp as metadata that specifies when the record was written into the Kafka topic. These timestamps can be used for a [rowtime attribute](connect.html#defining-the-schema) by selecting `timestamps: from-source` in YAML and `timestampsFromSource()` in Java/Scala respectively. 
+
+Make sure to add the version-specific Kafka dependency. In addition, a corresponding format needs to be specified for reading and writing rows from and to Kafka.
+
+{% top %}
+
+Table Formats
+-------------
+
+Flink provides a set of table formats that can be used with table connectors.
+
+A format tag indicates the format type for matching with a connector.
+
+### CSV Format
+
+The CSV format allows to read and write comma-separated rows.
+
+<div class="codetabs" markdown="1">
+<div data-lang="Java/Scala" markdown="1">
+{% highlight java %}
+.withFormat(
+  new Csv()
+    .field("field1", Types.STRING)    // required: ordered format fields
+    .field("field2", Types.TIMESTAMP)
+    .fieldDelimiter(",")              // optional: string delimiter "," by default 
+    .lineDelimiter("\n")              // optional: string delimiter "\n" by default 
+    .quoteCharacter('"')              // optional: single character for string values, empty by default
+    .commentPrefix('#')               // optional: string to indicate comments, empty by default
+    .ignoreFirstLine()                // optional: ignore the first line, by default it is not skipped
+    .ignoreParseErrors()              // optional: skip records with parse error instead of failing by default
+)
+{% endhighlight %}
+</div>
+
+<div data-lang="YAML" markdown="1">
+{% highlight yaml %}
+format:
+  type: csv
+  fields:                    # required: ordered format fields
+    - name: field1
+      type: VARCHAR
+    - name: field2
+      type: TIMESTAMP
+  field-delimiter: ","       # optional: string delimiter "," by default 
+  line-delimiter: "\n"       # optional: string delimiter "\n" by default 
+  quote-character: '"'       # optional: single character for string values, empty by default
+  comment-prefix: '#'        # optional: string to indicate comments, empty by default
+  ignore-first-line: false   # optional: boolean flag to ignore the first line, by default it is not skipped
+  ignore-parse-errors: true  # optional: skip records with parse error instead of failing by default
+{% endhighlight %}
+</div>
+</div>
+
+The CSV format is included in Flink and does not require additional dependencies.
+
+<span class="label label-danger">Attention</span> The CSV format for writing rows is limited at the moment. Only a custom field delimiter is supported as optional parameter.
+
+### JSON Format
+
+<span class="label label-info">Format: Serialization Schema</span>
+<span class="label label-info">Format: Deserialization Schema</span>
+
+The JSON format allows to read and write JSON data that corresponds to a given format schema. The format schema can be defined either as a Flink type, as a JSON schema, or derived from the desired table schema. A Flink type enables a more SQL-like definition and mapping to the corresponding SQL data types. The JSON schema allows for more complex and nested structures.
+
+If the format schema is equal to the table schema, the schema can also be automatically derived. This allows for defining schema information only once. The names, types, and field order of the format are determined by the table's schema. Time attributes are ignored if their origin is not a field. A `from` definition in the table schema is interpreted as a field renaming in the format.
+
+<div class="codetabs" markdown="1">
+<div data-lang="Java/Scala" markdown="1">
+{% highlight java %}
+.withFormat(
+  new Json()
+    .failOnMissingField(true)   // optional: flag whether to fail if a field is missing or not, false by default
+
+    // required: define the schema either by using type information which parses numbers to corresponding types
+    .schema(Type.ROW(...))
+
+    // or by using a JSON schema which parses to DECIMAL and TIMESTAMP
+    .jsonSchema(
+      "{" +
+      "  type: 'object'," +
+      "  properties: {" +
+      "    lon: {" +
+      "      type: 'number'" +
+      "    }," +
+      "    rideTime: {" +
+      "      type: 'string'," +
+      "      format: 'date-time'" +
+      "    }" +
+      "  }" +
+      "}"
+    )
+
+    // or use the table's schema
+    .deriveSchema()
+)
+{% endhighlight %}
+</div>
+
+<div data-lang="YAML" markdown="1">
+{% highlight yaml %}
+format:
+  type: json
+  fail-on-missing-field: true   # optional: flag whether to fail if a field is missing or not, false by default
+
+  # required: define the schema either by using a type string which parses numbers to corresponding types
+  schema: "ROW(lon FLOAT, rideTime TIMESTAMP)"
+
+  # or by using a JSON schema which parses to DECIMAL and TIMESTAMP
+  json-schema: >
+    {
+      type: 'object',
+      properties: {
+        lon: {
+          type: 'number'
+        },
+        rideTime: {
+          type: 'string',
+          format: 'date-time'
+        }
+      }
+    }
+
+  # or use the table's schema
+  derive-schema: true
+{% endhighlight %}
+</div>
+</div>
+
+The following table shows the mapping of JSON schema types to Flink SQL types:
+
+| JSON schema                       | Flink SQL               |
+| :-------------------------------- | :---------------------- |
+| `object`                          | `ROW`                   |
+| `boolean`                         | `BOOLEAN`               |
+| `array`                           | `ARRAY[_]`              |
+| `number`                          | `DECIMAL`               |
+| `integer`                         | `DECIMAL`               |
+| `string`                          | `VARCHAR`               |
+| `string` with `format: date-time` | `TIMESTAMP`             |
+| `string` with `format: date`      | `DATE`                  |
+| `string` with `format: time`      | `TIME`                  |
+| `string` with `encoding: base64`  | `ARRAY[TINYINT]`        |
+| `null`                            | `NULL` (unsupported yet)|
+
+
+Currently, Flink supports only a subset of the [JSON schema specification](http://json-schema.org/) `draft-07`. Union types (as well as `allOf`, `anyOf`, `not`) are not supported yet. `oneOf` and arrays of types are only supported for specifying nullability.
+
+Simple references that link to a common definition in the document are supported as shown in the more complex example below:
+
+{% highlight json %}
+{
+  "definitions": {
+    "address": {
+      "type": "object",
+      "properties": {
+        "street_address": {
+          "type": "string"
+        },
+        "city": {
+          "type": "string"
+        },
+        "state": {
+          "type": "string"
+        }
+      },
+      "required": [
+        "street_address",
+        "city",
+        "state"
+      ]
+    }
+  },
+  "type": "object",
+  "properties": {
+    "billing_address": {
+      "$ref": "#/definitions/address"
+    },
+    "shipping_address": {
+      "$ref": "#/definitions/address"
+    },
+    "optional_address": {
+      "oneOf": [
+        {
+          "type": "null"
+        },
+        {
+          "$ref": "#/definitions/address"
+        }
+      ]
+    }
+  }
+}
+{% endhighlight %}
+
+**Missing Field Handling:** By default, a missing JSON field is set to `null`. You can enable strict JSON parsing that will cancel the source (and query) if a field is missing.
+
+Make sure to add the JSON format as a dependency.
+
+
+### Apache Avro Format
+
+<span class="label label-info">Format: Serialization Schema</span>
+<span class="label label-info">Format: Deserialization Schema</span>
+
+The [Apache Avro](https://avro.apache.org/) format allows to read and write Avro data that corresponds to a given format schema. The format schema can be defined either as a fully qualified class name of an Avro specific record or as an Avro schema string. If a class name is used, the class must be available in the classpath during runtime.
+
+<div class="codetabs" markdown="1">
+<div data-lang="Java/Scala" markdown="1">
+{% highlight java %}
+.withFormat(
+  new Avro()
+
+    // required: define the schema either by using an Avro specific record class
+    .recordClass(User.class)
+
+    // or by using an Avro schema
+    .avroSchema(
+      "{" +
+      "  \"type\": \"record\"," +
+      "  \"name\": \"test\"," +
+      "  \"fields\" : [" +
+      "    {\"name\": \"a\", \"type\": \"long\"}," +
+      "    {\"name\": \"b\", \"type\": \"string\"}" +
+      "  ]" +
+      "}"
+    )
+)
+{% endhighlight %}
+</div>
+
+<div data-lang="YAML" markdown="1">
+{% highlight yaml %}
+format:
+  type: avro
+
+  # required: define the schema either by using an Avro specific record class
+  record-class: "org.organization.types.User"
+
+  # or by using an Avro schema
+  avro-schema: >
+    {
+      "type": "record",
+      "name": "test",
+      "fields" : [
+        {"name": "a", "type": "long"},
+        {"name": "b", "type": "string"}
+      ]
+    }
+{% endhighlight %}
+</div>
+</div>
+
+Avro types are mapped to the corresponding SQL data types. Union types are only supported for specifying nullability otherwise they are converted to an `ANY` type. The following table shows the mapping:
+
+| Avro schema                                 | Flink SQL               |
+| :------------------------------------------ | :---------------------- |
+| `record`                                    | `ROW`                   |
+| `enum`                                      | `VARCHAR`               |
+| `array`                                     | `ARRAY[_]`              |
+| `map`                                       | `MAP[VARCHAR, _]`       |
+| `union`                                     | non-null type or `ANY`  |
+| `fixed`                                     | `ARRAY[TINYINT]`        |
+| `string`                                    | `VARCHAR`               |
+| `bytes`                                     | `ARRAY[TINYINT]`        |
+| `int`                                       | `INT`                   |
+| `long`                                      | `BIGINT`                |
+| `float`                                     | `FLOAT`                 |
+| `double`                                    | `DOUBLE`                |
+| `boolean`                                   | `BOOLEAN`               |
+| `int` with `logicalType: date`              | `DATE`                  |
+| `int` with `logicalType: time-millis`       | `TIME`                  |
+| `int` with `logicalType: time-micros`       | `INT`                   |
+| `long` with `logicalType: timestamp-millis` | `TIMESTAMP`             |
+| `long` with `logicalType: timestamp-micros` | `BIGINT`                |
+| `bytes` with `logicalType: decimal`         | `DECIMAL`               |
+| `fixed` with `logicalType: decimal`         | `DECIMAL`               |
+| `null`                                      | `NULL` (unsupported yet)|
+
+Avro uses [Joda-Time](http://www.joda.org/joda-time/) for representing logical date and time types in specific record classes. The Joda-Time dependency is not part of Flink's distribution. Therefore, make sure that Joda-Time is in your classpath together with your specific record class during runtime. Avro formats specified via a schema string do not require Joda-Time to be present.
+
+Make sure to add the Apache Avro dependency.
+
+{% top %}
+
+Further TableSources and TableSinks
+-----------------------------------
+
+The following table sources and sinks have not yet been migrated (or have not been migrated entirely) to the new unified interfaces.
+
+These are the additional `TableSource`s which are provided with Flink:
+
+| **Class name** | **Maven dependency** | **Batch?** | **Streaming?** | **Description**
+| `OrcTableSource` | `flink-orc` | Y | N | A `TableSource` for ORC files.
+
+These are the additional `TableSink`s which are provided with Flink:
+
+| **Class name** | **Maven dependency** | **Batch?** | **Streaming?** | **Description**
+| `CsvTableSink` | `flink-table` | Y | Append | A simple sink for CSV files.
+| `JDBCAppendTableSink` | `flink-jdbc` | Y | Append | Writes a Table to a JDBC table.
+| `CassandraAppendTableSink` | `flink-connector-cassandra` | N | Append | Writes a Table to a Cassandra table. 
+
+### OrcTableSource
+
+The `OrcTableSource` reads [ORC files](https://orc.apache.org). ORC is a file format for structured data and stores the data in a compressed, columnar representation. ORC is very storage efficient and supports projection and filter push-down.
+
+An `OrcTableSource` is created as shown below:
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+
+// create Hadoop Configuration
+Configuration config = new Configuration();
+
+OrcTableSource orcTableSource = OrcTableSource.builder()
+  // path to ORC file(s). NOTE: By default, directories are recursively scanned.
+  .path("file:///path/to/data")
+  // schema of ORC files
+  .forOrcSchema("struct<name:string,addresses:array<struct<street:string,zip:smallint>>>")
+  // Hadoop configuration
+  .withConfiguration(config)
+  // build OrcTableSource
+  .build();
+{% endhighlight %}
+</div>
+
+<div data-lang="scala" markdown="1">
+{% highlight scala %}
+
+// create Hadoop Configuration
+val config = new Configuration()
+
+val orcTableSource = OrcTableSource.builder()
+  // path to ORC file(s). NOTE: By default, directories are recursively scanned.
+  .path("file:///path/to/data")
+  // schema of ORC files
+  .forOrcSchema("struct<name:string,addresses:array<struct<street:string,zip:smallint>>>")
+  // Hadoop configuration
+  .withConfiguration(config)
+  // build OrcTableSource
+  .build()
+{% endhighlight %}
+</div>
+</div>
+
+**Note:** The `OrcTableSource` does not support ORC's `Union` type yet.
+
+{% top %}
+
+### CsvTableSink
+
+The `CsvTableSink` emits a `Table` to one or more CSV files. 
+
+The sink only supports append-only streaming tables. It cannot be used to emit a `Table` that is continuously updated. See the [documentation on Table to Stream conversions](./streaming.html#table-to-stream-conversion) for details. When emitting a streaming table, rows are written at least once (if checkpointing is enabled) and the `CsvTableSink` does not split output files into bucket files but continuously writes to the same files. 
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+
+Table table = ...
+
+table.writeToSink(
+  new CsvTableSink(
+    path,                  // output path 
+    "|",                   // optional: delimit files by '|'
+    1,                     // optional: write to a single file
+    WriteMode.OVERWRITE)); // optional: override existing files
+
+{% endhighlight %}
+</div>
+
+<div data-lang="scala" markdown="1">
+{% highlight scala %}
+
+val table: Table = ???
+
+table.writeToSink(
+  new CsvTableSink(
+    path,                             // output path 
+    fieldDelim = "|",                 // optional: delimit files by '|'
+    numFiles = 1,                     // optional: write to a single file
+    writeMode = WriteMode.OVERWRITE)) // optional: override existing files
+
+{% endhighlight %}
+</div>
+</div>
+
+### JDBCAppendTableSink
+
+The `JDBCAppendTableSink` emits a `Table` to a JDBC connection. The sink only supports append-only streaming tables. It cannot be used to emit a `Table` that is continuously updated. See the [documentation on Table to Stream conversions](./streaming.html#table-to-stream-conversion) for details. 
+
+The `JDBCAppendTableSink` inserts each `Table` row at least once into the database table (if checkpointing is enabled). However, you can specify the insertion query using <code>REPLACE</code> or <code>INSERT OVERWRITE</code> to perform upsert writes to the database.
+
+To use the JDBC sink, you have to add the JDBC connector dependency (<code>flink-jdbc</code>) to your project. Then you can create the sink using <code>JDBCAppendSinkBuilder</code>:
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+
+JDBCAppendTableSink sink = JDBCAppendTableSink.builder()
+  .setDrivername("org.apache.derby.jdbc.EmbeddedDriver")
+  .setDBUrl("jdbc:derby:memory:ebookshop")
+  .setQuery("INSERT INTO books (id) VALUES (?)")
+  .setParameterTypes(INT_TYPE_INFO)
+  .build();
+
+Table table = ...
+table.writeToSink(sink);
+{% endhighlight %}
+</div>
+
+<div data-lang="scala" markdown="1">
+{% highlight scala %}
+val sink: JDBCAppendTableSink = JDBCAppendTableSink.builder()
+  .setDrivername("org.apache.derby.jdbc.EmbeddedDriver")
+  .setDBUrl("jdbc:derby:memory:ebookshop")
+  .setQuery("INSERT INTO books (id) VALUES (?)")
+  .setParameterTypes(INT_TYPE_INFO)
+  .build()
+
+val table: Table = ???
+table.writeToSink(sink)
+{% endhighlight %}
+</div>
+</div>
+
+Similar to using <code>JDBCOutputFormat</code>, you have to explicitly specify the name of the JDBC driver, the JDBC URL, the query to be executed, and the field types of the JDBC table. 
+
+{% top %}
+
+### CassandraAppendTableSink
+
+The `CassandraAppendTableSink` emits a `Table` to a Cassandra table. The sink only supports append-only streaming tables. It cannot be used to emit a `Table` that is continuously updated. See the [documentation on Table to Stream conversions](./streaming.html#table-to-stream-conversion) for details. 
+
+The `CassandraAppendTableSink` inserts all rows at least once into the Cassandra table if checkpointing is enabled. However, you can specify the query as upsert query.
+
+To use the `CassandraAppendTableSink`, you have to add the Cassandra connector dependency (<code>flink-connector-cassandra</code>) to your project. The example below shows how to use the `CassandraAppendTableSink`.
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+
+ClusterBuilder builder = ... // configure Cassandra cluster connection
+
+CassandraAppendTableSink sink = new CassandraAppendTableSink(
+  builder, 
+  // the query must match the schema of the table
+  INSERT INTO flink.myTable (id, name, value) VALUES (?, ?, ?));
+
+Table table = ...
+table.writeToSink(sink);
+{% endhighlight %}
+</div>
+
+<div data-lang="scala" markdown="1">
+{% highlight scala %}
+val builder: ClusterBuilder = ... // configure Cassandra cluster connection
+
+val sink: CassandraAppendTableSink = new CassandraAppendTableSink(
+  builder, 
+  // the query must match the schema of the table
+  INSERT INTO flink.myTable (id, name, value) VALUES (?, ?, ?))
+
+val table: Table = ???
+table.writeToSink(sink)
+{% endhighlight %}
+</div>
+</div>
+
+{% top %}
\ No newline at end of file
diff --git a/docs/dev/table/sourceSinks.md b/docs/dev/table/sourceSinks.md
index 4c5f2e27234..f14e89cf3f0 100644
--- a/docs/dev/table/sourceSinks.md
+++ b/docs/dev/table/sourceSinks.md
@@ -1,5 +1,5 @@
 ---
-title: "Table Sources & Sinks"
+title: "User-defined Sources & Sinks"
 nav-parent_id: tableapi
 nav-pos: 40
 ---
@@ -22,752 +22,17 @@ specific language governing permissions and limitations
 under the License.
 -->
 
-A `TableSource` provides access to data which is stored in external systems (database, key-value store, message queue) or files. After a [TableSource is registered in a TableEnvironment](common.html#register-a-tablesource) it can accessed by [Table API](tableApi.html) or [SQL](sql.html) queries.
+A `TableSource` provides access to data which is stored in external systems (database, key-value store, message queue) or files. After a [TableSource is registered in a TableEnvironment](common.html#register-a-tablesource) it can be accessed by [Table API](tableApi.html) or [SQL](sql.html) queries.
 
-A TableSink [emits a Table](common.html#emit-a-table) to an external storage system, such as a database, key-value store, message queue, or file system (in different encodings, e.g., CSV, Parquet, or ORC). 
+A `TableSink` [emits a Table](common.html#emit-a-table) to an external storage system, such as a database, key-value store, message queue, or file system (in different encodings, e.g., CSV, Parquet, or ORC).
 
-Have a look at the [common concepts and API](common.html) page for details how to [register a TableSource](common.html#register-a-tablesource) and how to [emit a Table through a TableSink](common.html#emit-a-table).
+A `TableFactory` allows for separating the declaration of a connection to an external system from the actual implementation. A table factory creates configured instances of table sources and sinks from normalized, string-based properties. The properties can be generated programmatically using a `Descriptor` or via YAML configuration files for the [SQL Client](sqlClient.html).
+
+Have a look at the [common concepts and API](common.html) page for details how to [register a TableSource](common.html#register-a-tablesource) and how to [emit a Table through a TableSink](common.html#emit-a-table). See the [built-in sources, sinks, and formats](connect.html) page for examples how to use factories.
 
 * This will be replaced by the TOC
 {:toc}
 
-Provided TableSources
----------------------
-
-Currently, Flink provides the `CsvTableSource` to read CSV files and a few table sources to read JSON or Avro data from Kafka.
-A custom `TableSource` can be defined by implementing the `BatchTableSource` or `StreamTableSource` interface. See section on [defining a custom TableSource](#define-a-tablesource) for details.
-
-| **Class name** | **Maven dependency** | **Batch?** | **Streaming?** | **Description**
-| `Kafka011AvroTableSource` | `flink-connector-kafka-0.11` | N | Y | A `TableSource` for Avro-encoded Kafka 0.11 topics.
-| `Kafka011JsonTableSource` | `flink-connector-kafka-0.11` | N | Y | A `TableSource` for flat Json-encoded Kafka 0.11 topics.
-| `Kafka010AvroTableSource` | `flink-connector-kafka-0.10` | N | Y | A `TableSource` for Avro-encoded Kafka 0.10 topics.
-| `Kafka010JsonTableSource` | `flink-connector-kafka-0.10` | N | Y | A `TableSource` for flat Json-encoded Kafka 0.10 topics.
-| `Kafka09AvroTableSource` | `flink-connector-kafka-0.9` | N | Y | A `TableSource` for Avro-encoded Kafka 0.9 topics.
-| `Kafka09JsonTableSource` | `flink-connector-kafka-0.9` | N | Y | A `TableSource` for flat Json-encoded Kafka 0.9 topics.
-| `Kafka08AvroTableSource` | `flink-connector-kafka-0.8` | N | Y | A `TableSource` for Avro-encoded Kafka 0.8 topics.
-| `Kafka08JsonTableSource` | `flink-connector-kafka-0.8` | N | Y | A `TableSource` for flat Json-encoded Kafka 0.8 topics.
-| `CsvTableSource` | `flink-table` | Y | Y | A simple `TableSource` for CSV files.
-| `OrcTableSource` | `flink-orc` | Y | N | A `TableSource` for ORC files.
-
-All sources that come with the `flink-table` dependency are directly available for Table API or SQL programs. For all other table sources, you have to add the respective dependency in addition to the `flink-table` dependency.
-
-{% top %}
-
-### KafkaJsonTableSource
-
-A `KafkaJsonTableSource` ingests JSON-encoded messages from a Kafka topic. Currently, only JSON records with flat (non-nested) schema are supported.
-
-A `KafkaJsonTableSource` is created and configured using a builder. The following example shows how to create a `KafkaJsonTableSource` with basic properties:
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-// create builder
-KafkaTableSource source = Kafka010JsonTableSource.builder()
-  // set Kafka topic
-  .forTopic("sensors")
-  // set Kafka consumer properties
-  .withKafkaProperties(kafkaProps)
-  // set Table schema
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG())  
-    .field("temp", Types.DOUBLE())
-    .field("time", Types.SQL_TIMESTAMP()).build())
-  .build();
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-// create builder
-val source: KafkaTableSource = Kafka010JsonTableSource.builder()
-  // set Kafka topic
-  .forTopic("sensors")
-  // set Kafka consumer properties
-  .withKafkaProperties(kafkaProps)
-  // set Table schema
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG)
-    .field("temp", Types.DOUBLE)
-    .field("time", Types.SQL_TIMESTAMP).build())
-  .build()
-{% endhighlight %}
-</div>
-</div>
-
-#### Optional Configuration
-
-* **Time Attributes:** Please see the sections on [configuring a rowtime attribute](#configure-a-rowtime-attribute) and [configuring a processing time attribute](#configure-a-processing-time-attribute).
-
-* **Explicit JSON parse schema:** By default, the JSON records are parsed with the table schema. You can configure an explicit JSON schema and provide a mapping from table schema fields to JSON fields as shown in the following example.
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-Map<String, String> mapping = new HashMap<>();
-mapping.put("sensorId", "id");
-mapping.put("temperature", "temp");
-
-KafkaTableSource source = Kafka010JsonTableSource.builder()
-  // ...
-  // set Table schema
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG())
-    .field("temperature", Types.DOUBLE()).build())
-  // set JSON parsing schema
-  .forJsonSchema(TableSchema.builder()
-    .field("id", Types.LONG())
-    .field("temp", Types.DOUBLE()).build())
-  // set mapping from table fields to JSON fields
-  .withTableToJsonMapping(mapping)
-  .build();
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-val source: KafkaTableSource = Kafka010JsonTableSource.builder()
-  // ...
-  // set Table schema
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG)
-    .field("temperature", Types.DOUBLE).build())
-  // set JSON parsing schema
-  .forJsonSchema(TableSchema.builder()
-    .field("id", Types.LONG)
-    .field("temp", Types.DOUBLE).build())
-  // set mapping from table fields to JSON fields
-  .withTableToJsonMapping(Map(
-    "sensorId" -> "id", 
-    "temperature" -> "temp").asJava)
-  .build()
-{% endhighlight %}
-</div>
-</div>
-
-* **Missing Field Handling:** By default, a missing JSON field is set to `null`. You can enable strict JSON parsing that will cancel the source (and query) if a field is missing.
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-KafkaTableSource source = Kafka010JsonTableSource.builder()
-  // ...
-  // configure missing field behavior
-  .failOnMissingField(true)
-  .build();
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-val source: KafkaTableSource = Kafka010JsonTableSource.builder()
-  // ...
-  // configure missing field behavior
-  .failOnMissingField(true)
-  .build()
-{% endhighlight %}
-</div>
-</div>
-
-* **Specify the start reading position:** By default, the table source will start reading data from the committed group offsets in Zookeeper or Kafka brokers. You can specify other start positions via the builder's methods, which correspond to the configurations in section [Kafka Consumers Start Position Configuration](../connectors/kafka.html#kafka-consumers-start-position-configuration).
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-KafkaTableSource source = Kafka010JsonTableSource.builder()
-  // ...
-  // start reading from the earliest offset
-  .fromEarliest()
-  .build();
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-val source: KafkaTableSource = Kafka010JsonTableSource.builder()
-  // ...
-  // start reading from the earliest offset
-  .fromEarliest()
-  .build()
-{% endhighlight %}
-</div>
-</div>
-
-{% top %}
-
-### KafkaAvroTableSource
-
-A `KafkaAvroTableSource` ingests Avro-encoded records from a Kafka topic.
-
-A `KafkaAvroTableSource` is created and configured using a builder. The following example shows how to create a `KafkaAvroTableSource` with basic properties:
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-// create builder
-KafkaTableSource source = Kafka010AvroTableSource.builder()
-  // set Kafka topic
-  .forTopic("sensors")
-  // set Kafka consumer properties
-  .withKafkaProperties(kafkaProps)
-  // set Table schema
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG())
-    .field("temp", Types.DOUBLE())
-    .field("time", Types.SQL_TIMESTAMP()).build())
-  // set class of Avro record
-  .forAvroRecordClass(SensorReading.class)
-  .build();
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-// create builder
-val source: KafkaTableSource = Kafka010JsonTableSource.builder()
-  // set Kafka topic
-  .forTopic("sensors")
-  // set Kafka consumer properties
-  .withKafkaProperties(kafkaProps)
-  // set Table schema
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG)
-    .field("temp", Types.DOUBLE)
-    .field("time", Types.SQL_TIMESTAMP).build())
-  // set class of Avro record
-  .forAvroRecordClass(classOf[SensorReading])
-  .build()
-{% endhighlight %}
-</div>
-</div>
-
-**NOTE:** The specified Avro record class must provide all fields of the table schema with corresponding type.
-
-#### Optional Configuration
-
-* **Time Attributes:** Please see the sections on [configuring a rowtime attribute](#configure-a-rowtime-attribute) and [configuring a processing time attribute](#configure-a-processing-time-attribute).
-
-* **Explicit Schema Field to Avro Mapping:** By default, all fields of the table schema are mapped by name to fields of the Avro records. If the fields in the Avro records have different names, a mapping from table schema fields to Avro fields can be specified.
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-Map<String, String> mapping = new HashMap<>();
-mapping.put("sensorId", "id");
-mapping.put("temperature", "temp");
-
-KafkaTableSource source = Kafka010AvroTableSource.builder()
-  // ...
-  // set Table schema
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG())
-    .field("temperature", Types.DOUBLE()).build())
-  // set class of Avro record with fields [id, temp]
-  .forAvroRecordClass(SensorReading.class)
-  // set mapping from table fields to Avro fields
-  .withTableToAvroMapping(mapping)
-  .build();
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-val source: KafkaTableSource = Kafka010AvroTableSource.builder()
-  // ...
-  // set Table schema
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG)
-    .field("temperature", Types.DOUBLE).build())
-  // set class of Avro record with fields [id, temp]
-  .forAvroRecordClass(classOf[SensorReading])
-  // set mapping from table fields to Avro fields
-  .withTableToAvroMapping(Map(
-    "sensorId" -> "id", 
-    "temperature" -> "temp").asJava)
-  .build()
-{% endhighlight %}
-</div>
-</div>
-
-* **Specify the start reading position:** By default, the table source will start reading data from the committed group offsets in Zookeeper or Kafka brokers. You can specify other start positions via the builder's methods, which correspond to the configurations in section [Kafka Consumers Start Position Configuration](../connectors/kafka.html#kafka-consumers-start-position-configuration).
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-KafkaTableSource source = Kafka010AvroTableSource.builder()
-  // ...
-  // start reading from the earliest offset
-  .fromEarliest()
-  .build();
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-val source: KafkaTableSource = Kafka010AvroTableSource.builder()
-  // ...
-  // start reading from the earliest offset
-  .fromEarliest()
-  .build()
-{% endhighlight %}
-</div>
-</div>
-
-{% top %}
-
-### Configuring a Processing Time Attribute
-
-[Processing time attributes](streaming.html#processing-time) are commonly used in streaming queries. A processing time attribute returns the current wall-clock time of the operator that accesses it. 
-
-Batch queries support processing time attributes as well. However, processing time attributes are initialized with the wall-clock time of the table scan operator and keep this value throughout the query evaluation. 
-
-A table schema field of type `SQL_TIMESTAMP` can be declared as a processing time attribute as shown in the following example.
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-KafkaTableSource source = Kafka010JsonTableSource.builder()
-  // ... 
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG())  
-    .field("temp", Types.DOUBLE())
-    // field "ptime" is of type SQL_TIMESTAMP
-    .field("ptime", Types.SQL_TIMESTAMP()).build())
-  // declare "ptime" as processing time attribute
-  .withProctimeAttribute("ptime")
-  .build();
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-val source: KafkaTableSource = Kafka010JsonTableSource.builder()
-  // ...
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG)
-    .field("temp", Types.DOUBLE)
-    // field "ptime" is of type SQL_TIMESTAMP
-    .field("ptime", Types.SQL_TIMESTAMP).build())
-  // declare "ptime" as processing time attribute
-  .withProctimeAttribute("ptime")
-  .build()
-{% endhighlight %}
-</div>
-</div>
-
-{% top %}
-
-### Configuring a Rowtime Attribute
-
-[Rowtime attributes](streaming.html#event-time) are attributes of type `TIMESTAMP` and handled in a unified way in stream and batch queries.
-
-A table schema field of type `SQL_TIMESTAMP` can be declared as rowtime attribute by specifying 
-
-* the name of the field, 
-* a `TimestampExtractor` that computes the actual value for the attribute (usually from one or more other attributes), and
-* a `WatermarkStrategy` that specifies how watermarks are generated for the the rowtime attribute.
-
-The following example shows how to configure a rowtime attribute.
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-KafkaTableSource source = Kafka010JsonTableSource.builder()
-  // ...
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG())
-    .field("temp", Types.DOUBLE())
-    // field "rtime" is of type SQL_TIMESTAMP
-    .field("rtime", Types.SQL_TIMESTAMP()).build())
-  .withRowtimeAttribute(
-    // "rtime" is rowtime attribute
-    "rtime",
-    // value of "rtime" is extracted from existing field with same name
-    new ExistingField("rtime"),
-    // values of "rtime" are at most out-of-order by 30 seconds
-    new BoundedOutOfOrderWatermarks(30000L))
-  .build();
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-val source: KafkaTableSource = Kafka010JsonTableSource.builder()
-  // ...
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG)
-    .field("temp", Types.DOUBLE)
-    // field "rtime" is of type SQL_TIMESTAMP
-    .field("rtime", Types.SQL_TIMESTAMP).build())
-  .withRowtimeAttribute(
-    // "rtime" is rowtime attribute
-    "rtime",
-    // value of "rtime" is extracted from existing field with same name
-    new ExistingField("rtime"),
-    // values of "rtime" are at most out-of-order by 30 seconds
-    new BoundedOutOfOrderTimestamps(30000L))
-  .build()
-{% endhighlight %}
-</div>
-</div>
-
-#### Extracting Kafka 0.10+ Timestamps into Rowtime Attribute
-
-Since Kafka 0.10, Kafka messages have a timestamp as metadata that specifies when the record was written into the Kafka topic. `KafkaTableSources` can assign Kafka's message timestamp as rowtime attribute as follows: 
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-KafkaTableSource source = Kafka010JsonTableSource.builder()
-  // ...
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG())
-    .field("temp", Types.DOUBLE())
-    // field "rtime" is of type SQL_TIMESTAMP
-    .field("rtime", Types.SQL_TIMESTAMP()).build())
-  // use Kafka timestamp as rowtime attribute
-  .withKafkaTimestampAsRowtimeAttribute()(
-    // "rtime" is rowtime attribute
-    "rtime",
-    // values of "rtime" are ascending
-    new AscendingTimestamps())
-  .build();
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-val source: KafkaTableSource = Kafka010JsonTableSource.builder()
-  // ...
-  .withSchema(TableSchema.builder()
-    .field("sensorId", Types.LONG)
-    .field("temp", Types.DOUBLE)
-    // field "rtime" is of type SQL_TIMESTAMP
-    .field("rtime", Types.SQL_TIMESTAMP).build())
-  // use Kafka timestamp as rowtime attribute
-  .withKafkaTimestampAsRowtimeAttribute()(
-    // "rtime" is rowtime attribute
-    "rtime",
-    // values of "rtime" are ascending
-    new AscendingTimestamps())
-  .build()
-{% endhighlight %}
-</div>
-</div>
-
-#### Provided TimestampExtractors
-
-Flink provides `TimestampExtractor` implementations for common use cases.
-The following `TimestampExtractor` implementations are currently available:
-
-* `ExistingField(fieldName)`: Extracts the value of a rowtime attribute from an existing `LONG` or `SQL_TIMESTAMP`, or ISO date formatted `STRING` field.
-  * One example of ISO date format would be '2018-05-28 12:34:56.000'.
-* `StreamRecordTimestamp()`: Extracts the value of a rowtime attribute from the timestamp of the `DataStream` `StreamRecord`. Note, this `TimestampExtractor` is not available for batch table sources.
-
-A custom `TimestampExtractor` can be defined by implementing the corresponding interface.
-
-#### Provided WatermarkStrategies
-
-Flink provides `WatermarkStrategy` implementations for common use cases.
-The following `WatermarkStrategy` implementations are currently available:
-
-* `AscendingTimestamps`: A watermark strategy for ascending timestamps. Records with timestamps that are out-of-order will be considered late.
-* `BoundedOutOfOrderTimestamps(delay)`: A watermark strategy for timestamps that are at most out-of-order by the specified delay.
-
-A custom `WatermarkStrategy` can be defined by implementing the corresponding interface.
-
-{% top %}
-
-### CsvTableSource
-
-The `CsvTableSource` is already included in `flink-table` without additional dependencies.
-
-The easiest way to create a `CsvTableSource` is by using the enclosed builder `CsvTableSource.builder()`, the builder has the following methods to configure properties:
-
- - `path(String path)` Sets the path to the CSV file, required.
- - `field(String fieldName, TypeInformation<?> fieldType)` Adds a field with the field name and field type information, can be called multiple times, required. The call order of this method defines also the order of the fields in a row.
- - `fieldDelimiter(String delim)` Sets the field delimiter, `","` by default.
- - `lineDelimiter(String delim)` Sets the line delimiter, `"\n"` by default.
- - `quoteCharacter(Character quote)` Sets the quote character for String values, `null` by default.
- - `commentPrefix(String prefix)` Sets a prefix to indicate comments, `null` by default.
- - `ignoreFirstLine()` Ignore the first line. Disabled by default.
- - `ignoreParseErrors()` Skip records with parse error instead to fail. Throwing an exception by default.
-
-You can create the source as follows:
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-CsvTableSource csvTableSource = CsvTableSource
-    .builder()
-    .path("/path/to/your/file.csv")
-    .field("name", Types.STRING())
-    .field("id", Types.INT())
-    .field("score", Types.DOUBLE())
-    .field("comments", Types.STRING())
-    .fieldDelimiter("#")
-    .lineDelimiter("$")
-    .ignoreFirstLine()
-    .ignoreParseErrors()
-    .commentPrefix("%")
-    .build();
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-val csvTableSource = CsvTableSource
-    .builder
-    .path("/path/to/your/file.csv")
-    .field("name", Types.STRING)
-    .field("id", Types.INT)
-    .field("score", Types.DOUBLE)
-    .field("comments", Types.STRING)
-    .fieldDelimiter("#")
-    .lineDelimiter("$")
-    .ignoreFirstLine
-    .ignoreParseErrors
-    .commentPrefix("%")
-    .build
-{% endhighlight %}
-</div>
-</div>
-
-{% top %}
-
-### OrcTableSource
-
-The `OrcTableSource` reads [ORC files](https://orc.apache.org). ORC is a file format for structured data and stores the data in a compressed, columnar representation. ORC is very storage efficient and supports projection and filter push-down.
-
-An `OrcTableSource` is created as shown below:
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-
-// create Hadoop Configuration
-Configuration config = new Configuration();
-
-OrcTableSource orcTableSource = OrcTableSource.builder()
-  // path to ORC file(s). NOTE: By default, directories are recursively scanned.
-  .path("file:///path/to/data")
-  // schema of ORC files
-  .forOrcSchema("struct<name:string,addresses:array<struct<street:string,zip:smallint>>>")
-  // Hadoop configuration
-  .withConfiguration(config)
-  // build OrcTableSource
-  .build();
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-
-// create Hadoop Configuration
-val config = new Configuration()
-
-val orcTableSource = OrcTableSource.builder()
-  // path to ORC file(s). NOTE: By default, directories are recursively scanned.
-  .path("file:///path/to/data")
-  // schema of ORC files
-  .forOrcSchema("struct<name:string,addresses:array<struct<street:string,zip:smallint>>>")
-  // Hadoop configuration
-  .withConfiguration(config)
-  // build OrcTableSource
-  .build()
-{% endhighlight %}
-</div>
-</div>
-
-**Note:** The `OrcTableSource` does not support ORC's `Union` type yet.
-
-{% top %}
-
-Provided TableSinks
--------------------
-
-The following table lists the `TableSink`s which are provided with Flink.
-
-| **Class name** | **Maven dependency** | **Batch?** | **Streaming?** | **Description**
-| `CsvTableSink` | `flink-table` | Y | Append | A simple sink for CSV files.
-| `JDBCAppendTableSink` | `flink-jdbc` | Y | Append | Writes a Table to a JDBC table.
-| `CassandraAppendTableSink` | `flink-connector-cassandra` | N | Append | Writes a Table to a Cassandra table. 
-| `Kafka08JsonTableSink` | `flink-connector-kafka-0.8` | N | Append | A Kafka 0.8 sink with JSON encoding.
-| `Kafka09JsonTableSink` | `flink-connector-kafka-0.9` | N | Append | A Kafka 0.9 sink with JSON encoding.
-| `Kafka010JsonTableSink` | `flink-connector-kafka-0.10` | N | Append | A Kafka 0.10 sink with JSON encoding.
-
-All sinks that come with the `flink-table` dependency can be directly used by your Table programs. For all other table sinks, you have to add the respective dependency in addition to the `flink-table` dependency.
-
-A custom `TableSink` can be defined by implementing the `BatchTableSink`, `AppendStreamTableSink`, `RetractStreamTableSink`, or `UpsertStreamTableSink` interface. See section on [defining a custom TableSink](#define-a-tablesink) for details.
-
-{% top %}
-
-### KafkaJsonTableSink
-
-A `KafkaJsonTableSink` emits a [streaming append `Table`](./streaming.html#table-to-stream-conversion) to an Apache Kafka topic. The rows of the table are encoded as JSON records. Currently, only tables with flat schema, i.e., non-nested fields, are supported. 
-
-A `KafkaJsonTableSink` produces with at-least-once guarantees into a Kafka topic if the query is executed with [checkpointing enabled]({{ site.baseurl }}/dev/stream/state/checkpointing.html#enabling-and-configuring-checkpointing). 
-
-By default, a `KafkaJsonTableSink` writes to at most as many partitions as its own parallelism (each parallel instance of the sink writes to exactly one partition). In order to distribute the writes to more partitions or control the routing of rows into partitions, a custom `FlinkKafkaPartitioner` can be provided.
-
-The following example shows how to create a `KafkaJsonTableSink` for Kafka 0.10. Sinks for Kafka 0.8 and 0.9 are instantiated analogously. 
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-
-Table table = ...
-
-Properties props = new Properties();
-props.setProperty("bootstrap.servers", "localhost:9092");
-
-table.writeToSink(
-  new Kafka010JsonTableSink(
-    "myTopic",                // Kafka topic to write to
-    props));                  // Properties to configure the producer
-
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-
-val table: Table = ???
-
-val props = new Properties()
-props.setProperty("bootstrap.servers", "localhost:9092")
-
-table.writeToSink(
-  new Kafka010JsonTableSink(
-    "myTopic",                // Kafka topic to write to
-    props))                   // Properties to configure the producer
-  
-{% endhighlight %}
-</div>
-</div>
-
-### CsvTableSink
-
-The `CsvTableSink` emits a `Table` to one or more CSV files. 
-
-The sink only supports append-only streaming tables. It cannot be used to emit a `Table` that is continuously updated. See the [documentation on Table to Stream conversions](./streaming.html#table-to-stream-conversion) for details. When emitting a streaming table, rows are written at least once (if checkpointing is enabled) and the `CsvTableSink` does not split output files into bucket files but continuously writes to the same files. 
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-
-Table table = ...
-
-table.writeToSink(
-  new CsvTableSink(
-    path,                  // output path 
-    "|",                   // optional: delimit files by '|'
-    1,                     // optional: write to a single file
-    WriteMode.OVERWRITE)); // optional: override existing files
-
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-
-val table: Table = ???
-
-table.writeToSink(
-  new CsvTableSink(
-    path,                             // output path 
-    fieldDelim = "|",                 // optional: delimit files by '|'
-    numFiles = 1,                     // optional: write to a single file
-    writeMode = WriteMode.OVERWRITE)) // optional: override existing files
-
-{% endhighlight %}
-</div>
-</div>
-
-### JDBCAppendTableSink
-
-The `JDBCAppendTableSink` emits a `Table` to a JDBC connection. The sink only supports append-only streaming tables. It cannot be used to emit a `Table` that is continuously updated. See the [documentation on Table to Stream conversions](./streaming.html#table-to-stream-conversion) for details. 
-
-The `JDBCAppendTableSink` inserts each `Table` row at least once into the database table (if checkpointing is enabled). However, you can specify the insertion query using <code>REPLACE</code> or <code>INSERT OVERWRITE</code> to perform upsert writes to the database.
-
-To use the JDBC sink, you have to add the JDBC connector dependency (<code>flink-jdbc</code>) to your project. Then you can create the sink using <code>JDBCAppendSinkBuilder</code>:
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-
-JDBCAppendTableSink sink = JDBCAppendTableSink.builder()
-  .setDrivername("org.apache.derby.jdbc.EmbeddedDriver")
-  .setDBUrl("jdbc:derby:memory:ebookshop")
-  .setQuery("INSERT INTO books (id) VALUES (?)")
-  .setParameterTypes(INT_TYPE_INFO)
-  .build();
-
-Table table = ...
-table.writeToSink(sink);
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-val sink: JDBCAppendTableSink = JDBCAppendTableSink.builder()
-  .setDrivername("org.apache.derby.jdbc.EmbeddedDriver")
-  .setDBUrl("jdbc:derby:memory:ebookshop")
-  .setQuery("INSERT INTO books (id) VALUES (?)")
-  .setParameterTypes(INT_TYPE_INFO)
-  .build()
-
-val table: Table = ???
-table.writeToSink(sink)
-{% endhighlight %}
-</div>
-</div>
-
-Similar to using <code>JDBCOutputFormat</code>, you have to explicitly specify the name of the JDBC driver, the JDBC URL, the query to be executed, and the field types of the JDBC table. 
-
-{% top %}
-
-### CassandraAppendTableSink
-
-The `CassandraAppendTableSink` emits a `Table` to a Cassandra table. The sink only supports append-only streaming tables. It cannot be used to emit a `Table` that is continuously updated. See the [documentation on Table to Stream conversions](./streaming.html#table-to-stream-conversion) for details. 
-
-The `CassandraAppendTableSink` inserts all rows at least once into the Cassandra table if checkpointing is enabled. However, you can specify the query as upsert query.
-
-To use the `CassandraAppendTableSink`, you have to add the Cassandra connector dependency (<code>flink-connector-cassandra</code>) to your project. The example below shows how to use the `CassandraAppendTableSink`.
-
-<div class="codetabs" markdown="1">
-<div data-lang="java" markdown="1">
-{% highlight java %}
-
-ClusterBuilder builder = ... // configure Cassandra cluster connection
-
-CassandraAppendTableSink sink = new CassandraAppendTableSink(
-  builder, 
-  // the query must match the schema of the table
-  INSERT INTO flink.myTable (id, name, value) VALUES (?, ?, ?));
-
-Table table = ...
-table.writeToSink(sink);
-{% endhighlight %}
-</div>
-
-<div data-lang="scala" markdown="1">
-{% highlight scala %}
-val builder: ClusterBuilder = ... // configure Cassandra cluster connection
-
-val sink: CassandraAppendTableSink = new CassandraAppendTableSink(
-  builder, 
-  // the query must match the schema of the table
-  INSERT INTO flink.myTable (id, name, value) VALUES (?, ?, ?))
-
-val table: Table = ???
-table.writeToSink(sink)
-{% endhighlight %}
-</div>
-</div>
-
-{% top %}
-
 Define a TableSource
 --------------------
 
@@ -823,7 +88,7 @@ The `BatchTableSource` interface extends the `TableSource` interface and defines
 <div class="codetabs" markdown="1">
 <div data-lang="java" markdown="1">
 {% highlight java %}
-BatchTableSource<T> extends TableSource<T> {
+BatchTableSource<T> implements TableSource<T> {
 
   public DataSet<T> getDataSet(ExecutionEnvironment execEnv);
 }
@@ -851,7 +116,7 @@ The `StreamTableSource` interface extends the `TableSource` interface and define
 <div class="codetabs" markdown="1">
 <div data-lang="java" markdown="1">
 {% highlight java %}
-StreamTableSource<T> extends TableSource<T> {
+StreamTableSource<T> implements TableSource<T> {
 
   public DataStream<T> getDataStream(StreamExecutionEnvironment execEnv);
 }
@@ -870,6 +135,8 @@ StreamTableSource[T] extends TableSource[T] {
 
 * `getDataStream(execEnv)`: Returns a `DataStream` with the data of the table. The type of the `DataStream` must be identical to the return type defined by the `TableSource.getReturnType()` method. The `DataStream` can by created using a regular [data source]({{ site.baseurl }}/dev/datastream_api.html#data-sources) of the DataStream API. Commonly, a `StreamTableSource` is implemented by wrapping a `SourceFunction` or a [stream connector]({{ site.baseurl }}/dev/connectors/).
 
+{% top %}
+
 ### Defining a TableSource with Time Attributes
 
 Time-based operations of streaming [Table API](tableApi.html#group-windows) and [SQL](sql.html#group-windows) queries, such as windowed aggregations or joins, require explicitly specified [time attributes]({{ site.baseurl }}/dev/table/streaming.html#time-attributes). 
@@ -878,7 +145,7 @@ A `TableSource` defines a time attribute as a field of type `Types.SQL_TIMESTAMP
 
 #### Defining a Processing Time Attribute
 
-A `TableSource` defines a [processing time attribute](streaming.html#processing-time) by implementing the `DefinedProctimeAttribute` interface. The interface looks as follows:
+[Processing time attributes](streaming.html#processing-time) are commonly used in streaming queries. A processing time attribute returns the current wall-clock time of the operator that accesses it. A `TableSource` defines a processing time attribute by implementing the `DefinedProctimeAttribute` interface. The interface looks as follows:
 
 <div class="codetabs" markdown="1">
 <div data-lang="java" markdown="1">
@@ -902,11 +169,19 @@ DefinedProctimeAttribute {
 
 * `getProctimeAttribute()`: Returns the name of the processing time attribute. The specified attribute must be defined of type `Types.SQL_TIMESTAMP` in the table schema and can be used in time-based operations. A `DefinedProctimeAttribute` table source can define no processing time attribute by returning `null`.
 
-**Note** Both `StreamTableSource` and `BatchTableSource` can implement `DefinedProctimeAttribute` and define a processing time attribute. In case of a `BatchTableSource` the processing time field is initialized with the current timestamp during the table scan.
+<span class="label label-danger">Attention</span> Both `StreamTableSource` and `BatchTableSource` can implement `DefinedProctimeAttribute` and define a processing time attribute. In case of a `BatchTableSource` the processing time field is initialized with the current timestamp during the table scan.
 
 #### Defining a Rowtime Attribute
 
-A `TableSource` defines a [rowtime attribute](streaming.html#event-time) by implementing the `DefinedRowtimeAttributes` interface. The interface looks as follows:
+[Rowtime attributes](streaming.html#event-time) are attributes of type `TIMESTAMP` and handled in a unified way in stream and batch queries.
+
+A table schema field of type `SQL_TIMESTAMP` can be declared as rowtime attribute by specifying 
+
+* the name of the field, 
+* a `TimestampExtractor` that computes the actual value for the attribute (usually from one or more other fields), and
+* a `WatermarkStrategy` that specifies how watermarks are generated for the the rowtime attribute.
+
+A `TableSource` defines a rowtime attribute by implementing the `DefinedRowtimeAttributes` interface. The interface looks as follows:
 
 <div class="codetabs" markdown="1">
 <div data-lang="java" markdown="1">
@@ -932,9 +207,33 @@ DefinedRowtimeAttributes {
   * `attributeName`: The name of the rowtime attribute in the table schema. The field must be defined with type `Types.SQL_TIMESTAMP`.
   * `timestampExtractor`: The timestamp extractor extracts the timestamp from a record with the return type. For example, it can convert convert a Long field into a timestamp or parse a String-encoded timestamp. Flink comes with a set of built-in `TimestampExtractor` implementation for common use cases. It is also possible to provide a custom implementation.
   * `watermarkStrategy`: The watermark strategy defines how watermarks are generated for the rowtime attribute. Flink comes with a set of built-in `WatermarkStrategy` implementations for common use cases. It is also possible to provide a custom implementation.
-* **Note** Although the `getRowtimeAttributeDescriptors()` method returns a list of descriptors, only a single rowtime attribute is support at the moment. We plan to remove this restriction in the future and support tables with more than one rowtime attribute.
 
-**IMPORTANT** Both, `StreamTableSource` and `BatchTableSource`, can implement `DefinedRowtimeAttributes` and define a rowtime attribute. In either case, the rowtime field is extracted using the `TimestampExtractor`. Hence, a `TableSource` that implements `StreamTableSource` and `BatchTableSource` and defines a rowtime attribute provides exactly the same data to streaming and batch queries.
+<span class="label label-danger">Attention</span> Although the `getRowtimeAttributeDescriptors()` method returns a list of descriptors, only a single rowtime attribute is support at the moment. We plan to remove this restriction in the future and support tables with more than one rowtime attribute.
+
+<span class="label label-danger">Attention</span> Both, `StreamTableSource` and `BatchTableSource`, can implement `DefinedRowtimeAttributes` and define a rowtime attribute. In either case, the rowtime field is extracted using the `TimestampExtractor`. Hence, a `TableSource` that implements `StreamTableSource` and `BatchTableSource` and defines a rowtime attribute provides exactly the same data to streaming and batch queries.
+
+##### Provided Timestamp Extractors
+
+Flink provides `TimestampExtractor` implementations for common use cases.
+
+The following `TimestampExtractor` implementations are currently available:
+
+* `ExistingField(fieldName)`: Extracts the value of a rowtime attribute from an existing `LONG`, `SQL_TIMESTAMP`, or timestamp formatted `STRING` field. One example of such a string would be '2018-05-28 12:34:56.000'.
+* `StreamRecordTimestamp()`: Extracts the value of a rowtime attribute from the timestamp of the `DataStream` `StreamRecord`. Note, this `TimestampExtractor` is not available for batch table sources.
+
+A custom `TimestampExtractor` can be defined by implementing the corresponding interface.
+
+##### Provided Watermark Strategies
+
+Flink provides `WatermarkStrategy` implementations for common use cases.
+
+The following `WatermarkStrategy` implementations are currently available:
+
+* `AscendingTimestamps`: A watermark strategy for ascending timestamps. Records with timestamps that are out-of-order will be considered late.
+* `BoundedOutOfOrderTimestamps(delay)`: A watermark strategy for timestamps that are at most out-of-order by the specified delay.
+* `PreserveWatermarks()`: A strategy which indicates the watermarks should be preserved from the underlying `DataStream`.
+
+A custom `WatermarkStrategy` can be defined by implementing the corresponding interface.
 
 {% top %}
 
@@ -1075,7 +374,7 @@ The interface looks as follows:
 <div class="codetabs" markdown="1">
 <div data-lang="java" markdown="1">
 {% highlight java %}
-BatchTableSink<T> extends TableSink<T> {
+BatchTableSink<T> implements TableSink<T> {
 
   public void emitDataSet(DataSet<T> dataSet);
 }
@@ -1103,7 +402,7 @@ The interface looks as follows:
 <div class="codetabs" markdown="1">
 <div data-lang="java" markdown="1">
 {% highlight java %}
-AppendStreamTableSink<T> extends TableSink<T> {
+AppendStreamTableSink<T> implements TableSink<T> {
 
   public void emitDataStream(DataStream<T> dataStream);
 }
@@ -1133,7 +432,7 @@ The interface looks as follows:
 <div class="codetabs" markdown="1">
 <div data-lang="java" markdown="1">
 {% highlight java %}
-RetractStreamTableSink<T> extends TableSink<Tuple2<Boolean, T>> {
+RetractStreamTableSink<T> implements TableSink<Tuple2<Boolean, T>> {
 
   public TypeInformation<T> getRecordType();
 
@@ -1167,7 +466,7 @@ The interface looks as follows:
 <div class="codetabs" markdown="1">
 <div data-lang="java" markdown="1">
 {% highlight java %}
-UpsertStreamTableSink<T> extends TableSink<Tuple2<Boolean, T>> {
+UpsertStreamTableSink<T> implements TableSink<Tuple2<Boolean, T>> {
 
   public void setKeyFields(String[] keys);
 
@@ -1204,3 +503,239 @@ A message with true boolean field is an upsert message for the configured key. A
 
 {% top %}
 
+Define a TableFactory
+---------------------
+
+A `TableFactory` allows to create different table-related instances from string-based properties. All available factories are called for matching to the given set of properties and a corresponding factory class.
+
+Factories leverage Java's [Service Provider Interfaces (SPI)](https://docs.oracle.com/javase/tutorial/sound/SPI-intro.html) for discovering. This means that every dependency and JAR file should contain a file `org.apache.flink.table.factories.TableFactory` in the `META_INF/services` resource directory that lists all available table factories that it provides.
+
+Every table factory needs to implement the following interface:
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+package org.apache.flink.table.factories;
+
+interface TableFactory {
+
+  Map<String, String> requiredContext();
+
+  List<String> supportedProperties();
+}
+{% endhighlight %}
+</div>
+
+<div data-lang="scala" markdown="1">
+{% highlight scala %}
+package org.apache.flink.table.factories
+
+trait TableFactory {
+
+  def requiredContext(): util.Map[String, String]
+
+  def supportedProperties(): util.List[String]
+}
+{% endhighlight %}
+</div>
+</div>
+
+* `requiredContext()`: Specifies the context that this factory has been implemented for. The framework guarantees to only match for this factory if the specified set of properties and values are met. Typical properties might be `connector.type`, `format.type`, or `update-mode`. Property keys such as `connector.property-version` and `format.property-version` are reserved for future backwards compatibility cases.
+* `supportedProperties`: List of property keys that this factory can handle. This method will be used for validation. If a property is passed that this factory cannot handle, an exception will be thrown. The list must not contain the keys that are specified by the context.
+
+In order to create a specific instance, a factory class can implement one or more interfaces provided in `org.apache.flink.table.factories`:
+
+* `BatchTableSourceFactory`: Creates a batch table source.
+* `BatchTableSinkFactory`: Creates a batch table sink.
+* `StreamTableSoureFactory`: Creates a stream table source.
+* `StreamTableSinkFactory`: Creates a stream table sink.
+* `DeserializationSchemaFactory`: Creates a deserialization schema format.
+* `SerializationSchemaFactory`: Creates a serialization schema format.
+
+The discovery of a factory happens in multiple stages:
+
+- Discover all available factories.
+- Filter by factory class (e.g., `StreamTableSourceFactory`).
+- Filter by matching context.
+- Filter by supported properties.
+- Verify that exactly one factory matches, otherwise throw an `AmbiguousTableFactoryException` or `NoMatchingTableFactoryException`.
+
+The following example shows how to provide a custom streaming source with an additional `connector.debug` property flag for parameterization.
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+import org.apache.flink.table.sources.StreamTableSource;
+import org.apache.flink.types.Row;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+class MySystemTableSourceFactory implements StreamTableSourceFactory<Row> {
+
+  @Override
+  public Map<String, String> requiredContext() {
+    Map<String, String> context = new HashMap<>();
+    context.put("update-mode", "append");
+    context.put("connector.type", "my-system");
+    return context;
+  }
+
+  @Override
+  public List<String> supportedProperties() {
+    List<String> list = new ArrayList<>();
+    list.add("connector.debug");
+    return list;
+  }
+
+  @Override
+  public StreamTableSource<Row> createStreamTableSource(Map<String, String> properties) {
+    boolean isDebug = Boolean.valueOf(properties.get("connector.debug"));
+
+    # additional validation of the passed properties can also happen here
+
+    return new MySystemAppendTableSource(isDebug);
+  }
+}
+{% endhighlight %}
+</div>
+
+<div data-lang="scala" markdown="1">
+{% highlight scala %}
+import java.util
+import org.apache.flink.table.sources.StreamTableSource
+import org.apache.flink.types.Row
+
+class MySystemTableSourceFactory extends StreamTableSourceFactory[Row] {
+
+  override def requiredContext(): util.Map[String, String] = {
+    val context = new util.HashMap[String, String]()
+    context.put("update-mode", "append")
+    context.put("connector.type", "my-system")
+    context
+  }
+
+  override def supportedProperties(): util.List[String] = {
+    val properties = new util.ArrayList[String]()
+    properties.add("connector.debug")
+    properties
+  }
+
+  override def createStreamTableSource(properties: util.Map[String, String]): StreamTableSource[Row] = {
+    val isDebug = java.lang.Boolean.valueOf(properties.get("connector.debug"))
+
+    # additional validation of the passed properties can also happen here
+
+    new MySystemAppendTableSource(isDebug)
+  }
+}
+{% endhighlight %}
+</div>
+</div>
+
+{% top %}
+
+### Use a TableFactory in the SQL Client
+
+In a SQL Client environment file, the previously presented factory could be declared as:
+
+{% highlight yaml %}
+tables:
+ - name: MySystemTable
+   type: source
+   update-mode: append
+   connector:
+     type: my-system
+     debug: true
+{% endhighlight %}
+
+The YAML file is translated into flattened string properties and a table factory is called with those properties that describe the connection to the external system:
+
+{% highlight text %}
+update-mode=append
+connector.type=my-system
+connector.debug=true
+{% endhighlight %}
+
+<span class="label label-danger">Attention</span> Properties such as `tables.#.name` or `tables.#.type` are SQL Client specifics and are not passed to any factory. The `type` property decides, depending on the execution environment, whether a `BatchTableSourceFactory`/`StreamTableSourceFactory` (for `source`), a `BatchTableSinkFactory`/`StreamTableSinkFactory` (for `sink`), or both (for `both`) need to discovered.
+
+{% top %}
+
+### Use a TableFactory in the Table & SQL API
+
+For a type-safe, programmatic approach with explanatory Scaladoc/Javadoc, the Table & SQL API offers descriptors in `org.apache.flink.table.descriptors` that translate into string-based properties. See the [built-in descriptors](connect.html) for sources, sinks, and formats as a reference.
+
+A connector for `MySystem` in our example can extend `ConnectorDescriptor` as shown below:
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+import org.apache.flink.table.descriptors.ConnectorDescriptor;
+import org.apache.flink.table.descriptors.DescriptorProperties;
+
+/**
+  * Connector to MySystem with debug mode.
+  */
+public class MySystemConnector extends ConnectorDescriptor {
+
+  public final boolean isDebug;
+
+  public MySystemConnector(boolean isDebug) {
+    super("my-system", 1, false);
+    this.isDebug = isDebug;
+  }
+
+  @Override
+  public void addConnectorProperties(DescriptorProperties properties) {
+    properties.putString("connector.debug", Boolean.toString(isDebug));
+  }
+}
+{% endhighlight %}
+</div>
+
+<div data-lang="scala" markdown="1">
+{% highlight scala %}
+import org.apache.flink.table.descriptors.ConnectorDescriptor
+import org.apache.flink.table.descriptors.DescriptorProperties
+
+/**
+  * Connector to MySystem with debug mode.
+  */
+class MySystemConnector(isDebug: Boolean) extends ConnectorDescriptor("my-system", 1, formatNeeded = false) {
+  
+  override protected def addConnectorProperties(properties: DescriptorProperties): Unit = {
+    properties.putString("connector.debug", isDebug.toString)
+  }
+}
+{% endhighlight %}
+</div>
+</div>
+
+The descriptor can then be used in the API as follows:
+
+<div class="codetabs" markdown="1">
+<div data-lang="java" markdown="1">
+{% highlight java %}
+StreamTableEnvironment tableEnv = // ...
+
+tableEnv
+  .connect(new MySystemConnector(true))
+  .inAppendMode()
+  .registerTableSource("MySystemTable");
+{% endhighlight %}
+</div>
+
+<div data-lang="scala" markdown="1">
+{% highlight scala %}
+val tableEnv: StreamTableEnvironment = // ...
+
+tableEnv
+  .connect(new MySystemConnector(isDebug = true))
+  .inAppendMode()
+  .registerTableSource("MySystemTable")
+{% endhighlight %}
+</div>
+</div>
+
+{% top %}
diff --git a/docs/dev/table/sql.md b/docs/dev/table/sql.md
index 57e0ba5b781..33c9aedad52 100644
--- a/docs/dev/table/sql.md
+++ b/docs/dev/table/sql.md
@@ -48,7 +48,7 @@ StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);
 DataStream<Tuple3<Long, String, Integer>> ds = env.addSource(...);
 
 // SQL query with an inlined (unregistered) table
-Table table = tableEnv.toTable(ds, "user, product, amount");
+Table table = tableEnv.fromDataStream(ds, "user, product, amount");
 Table result = tableEnv.sqlQuery(
   "SELECT SUM(amount) FROM " + table + " WHERE product LIKE '%Rubber%'");
 
@@ -478,7 +478,7 @@ FROM Orders CROSS JOIN UNNEST(tags) AS t (tag)
     </tr>
     <tr>
     	<td>
-        <strong>Join with User Defined Table Functions (UDTF)</strong><br>
+        <strong>Join with Table Function</strong><br>
         <span class="label label-primary">Batch</span> <span class="label label-primary">Streaming</span>
       </td>
     	<td>
@@ -2222,7 +2222,7 @@ COUNT(value [, value]* )
 {% endhighlight %}
       </td>
       <td>
-        <p>Returns the number of input rows for which <i>value</i> is not null.</p>
+        <p>Returns the number of input rows for which <i>value</i> is not null. Use <code>COUNT(DISTINCT value)</code> for the number of unique values in the column or expression.</p>
       </td>
     </tr>
 
@@ -2255,7 +2255,7 @@ SUM(numeric)
 {% endhighlight %}
       </td>
       <td>
-        <p>Returns the sum of <i>numeric</i> across all input values.</p>
+        <p>Returns the sum of <i>numeric</i> across all input values. Use <code>SUM(DISTINCT value)</code> for the sum of unique values in the column or expression.</p>
       </td>
     </tr>
 
@@ -2645,7 +2645,6 @@ The following functions are not supported yet:
 
 - Binary string operators and functions
 - System functions
-- Distinct aggregate functions like COUNT DISTINCT
 
 {% top %}
 
diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index 24af6557dc6..c181d24461d 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -106,7 +106,9 @@ Alice, 1
 Greg, 1
 {% endhighlight %}
 
-The [configuration section](sqlClient.html#configuration) explains how to read from table sources and configure other table program properties.
+Both result modes can be useful during the prototyping of SQL queries. In both modes, results are stored in the Java heap memory of the SQL Client. In order to keep the CLI interface responsive, the changelog mode only shows the latest 1000 changes. The table mode allows for navigating through bigger results that are only limited by the available main memory and the configured [maximum number of rows](sqlClient.html#configuration) (`max-table-result-rows`).
+
+After a query is defined, it can be submitted to the cluster as a long-running, detached Flink job. For this, a target system that stores the results needs to be specified using the [INSERT INTO statement](sqlClient.html#detached-sql-queries). The [configuration section](sqlClient.html#configuration) explains how to declare table sources for reading data, how to declare table sinks for writing data, and how to configure other table program properties.
 
 {% top %}
 
@@ -161,13 +163,9 @@ Every environment file is a regular [YAML file](http://yaml.org/). An example of
 # Define table sources here.
 
 tables:
-  - name: MyTableName
+  - name: MyTableSource
     type: source
-    schema:
-      - name: MyField1
-        type: INT
-      - name: MyField2
-        type: VARCHAR
+    update-mode: append
     connector:
       type: filesystem
       path: "/path/to/something.csv"
@@ -180,12 +178,30 @@ tables:
           type: VARCHAR
       line-delimiter: "\n"
       comment-prefix: "#"
+    schema:
+      - name: MyField1
+        type: INT
+      - name: MyField2
+        type: VARCHAR
+
+# Define user-defined functions here.
+
+functions:
+  - name: myUDF
+    from: class
+    class: foo.bar.AggregateUDF
+    constructor:
+      - 7.6
+      - false
 
 # Execution properties allow for changing the behavior of a table program.
 
 execution:
   type: streaming                   # required: execution mode either 'batch' or 'streaming'
   result-mode: table                # required: either 'table' or 'changelog'
+  max-table-result-rows: 1000000    # optional: maximum number of maintained rows in
+                                    #   'table' mode (1000000 by default, smaller 1 means unlimited)
+                                    #   (from Flink 1.6.1)
   time-characteristic: event-time   # optional: 'processing-time' or 'event-time' (default)
   parallelism: 1                    # optional: Flink's parallelism (1 by default)
   periodic-watermarks-interval: 200 # optional: interval for periodic watermarks (200 ms by default)
@@ -202,6 +218,7 @@ deployment:
 This configuration:
 
 - defines an environment with a table source `MyTableName` that reads from a CSV file,
+- defines a user-defined function `myUDF` that can be instantiated using the class name and two constructor parameters,
 - specifies a parallelism of 1 for queries executed in this streaming environment,
 - specifies an event-time characteristic, and
 - runs queries in the `table` result mode.
@@ -222,50 +239,32 @@ Queries that are executed in a batch environment, can only be retrieved using th
 
 The SQL Client does not require to setup a Java project using Maven or SBT. Instead, you can pass the dependencies as regular JAR files that get submitted to the cluster. You can either specify each JAR file separately (using `--jar`) or define entire library directories (using `--library`). For connectors to external systems (such as Apache Kafka) and corresponding data formats (such as JSON), Flink provides **ready-to-use JAR bundles**. These JAR files are suffixed with `sql-jar` and can be downloaded for each release from the Maven central repository.
 
-{% if site.is_stable %}
-
-#### Connectors
+The full list of offered SQL JARs and documentation about how to use them can be found on the [connection to external systems page](connect.html).
 
-| Name              | Version       | Download               |
-| :---------------- | :------------ | :--------------------- |
-| Filesystem        |               | Built-in               |
-| Apache Kafka      | 0.9           | [Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.9{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.9{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar) |
-| Apache Kafka      | 0.10          | [Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.10{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.10{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar) |
-| Apache Kafka      | 0.11          | [Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.11{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.11{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar) |
-
-#### Formats
-
-| Name              | Download               |
-| :---------------- | :--------------------- |
-| CSV               | Built-in               |
-| JSON              | [Download](http://central.maven.org/maven2/org/apache/flink/flink-json/{{site.version}}/flink-json-{{site.version}}-sql-jar.jar) |
-| Apache Avro       | [Download](http://central.maven.org/maven2/org/apache/flink/flink-avro/{{site.version}}/flink-avro-{{site.version}}-sql-jar.jar) |
-
-{% endif %}
-
-{% top %}
-
-Table Sources
--------------
-
-Sources are defined using a set of [YAML properties](http://yaml.org/). Similar to a SQL `CREATE TABLE` statement you define the name of the table, the final schema of the table, connector, and a data format if necessary. Additionally, you have to specify its type (source, sink, or both).
-
-{% highlight yaml %}
-name: MyTable     # required: string representing the table name
-type: source      # required: currently only 'source' is supported
-schema: ...       # required: final table schema
-connector: ...    # required: connector configuration
-format: ...       # optional: format that depends on the connector
-{% endhighlight %}
-
-<span class="label label-danger">Attention</span> Not every combination of connector and format is supported. Internally, your YAML file is translated into a set of string-based properties by which the SQL Client tries to resolve a matching table source. If a table source can be resolved also depends on the JAR files available in the classpath.
-
-The following example shows an environment file that defines a table source reading JSON data from Apache Kafka. All properties are explained in the following subsections. 
+The following example shows an environment file that defines a table source reading JSON data from Apache Kafka.
 
 {% highlight yaml %}
 tables:
   - name: TaxiRides
     type: source
+    update-mode: append
+    connector:
+      property-version: 1
+      type: kafka
+      version: "0.11"
+      topic: TaxiRides
+      startup-mode: earliest-offset
+      properties:
+        - key: zookeeper.connect
+          value: localhost:2181
+        - key: bootstrap.servers
+          value: localhost:9092
+        - key: group.id
+          value: testGroup
+    format:
+      property-version: 1
+      type: json
+      schema: "ROW<rideId LONG, lon FLOAT, lat FLOAT, rideTime TIMESTAMP>"
     schema:
       - name: rideId
         type: LONG
@@ -285,351 +284,150 @@ tables:
       - name: procTime
         type: TIMESTAMP
         proctime: true
-    connector:
-      property-version: 1
-      type: kafka
-      version: 0.11
-      topic: TaxiRides
-      startup-mode: earliest-offset
-      properties:
-        - key: zookeeper.connect
-          value: localhost:2181
-        - key: bootstrap.servers
-          value: localhost:9092
-        - key: group.id
-          value: testGroup
-    format:
-      property-version: 1
-      type: json
-      schema: "ROW(rideId LONG, lon FLOAT, lat FLOAT, rideTime TIMESTAMP)"
 {% endhighlight %}
 
-The resulting schema of the `TaxiRide` table contains most of the fields of the JSON schema. Furthermore, it adds a rowtime attribute `rowTime` and processing-time attribute `procTime`. Both `connector` and `format` allow to define a property version (which is currently version `1`) for future backwards compatibility.
-
-{% top %}
+The resulting schema of the `TaxiRide` table contains most of the fields of the JSON schema. Furthermore, it adds a rowtime attribute `rowTime` and processing-time attribute `procTime`.
 
-### Schema Properties
+Both `connector` and `format` allow to define a property version (which is currently version `1`) for future backwards compatibility.
 
-The schema allows for describing the final appearance of a table. It specifies the final name, final type, and the origin of a field. The origin of a field might be important if the name of the field should differ from the input format. For instance, a field `name&field` should reference `nameField` from an Avro format.
+{% top %}
 
-{% highlight yaml %}
-schema:
-  - name: MyField1
-    type: ...
-  - name: MyField2
-    type: ...
-  - name: MyField3
-    type: ...
-{% endhighlight %}
+### User-defined Functions
 
-For *each field*, the following properties can be used:
+The SQL Client allows users to create custom, user-defined functions to be used in SQL queries. Currently, these functions are restricted to be defined programmatically in Java/Scala classes.
 
-{% highlight yaml %}
-name: ...         # required: final name of the field
-type: ...         # required: final type of the field represented as a type string
-proctime: ...     # optional: boolean flag whether this field should be a processing-time attribute
-rowtime: ...      # optional: wether this field should be a event-time attribute
-from: ...         # optional: original field in the input that is referenced/aliased by this field
-{% endhighlight %}
+In order to provide a user-defined function, you need to first implement and compile a function class that extends `ScalarFunction`, `AggregateFunction` or `TableFunction` (see [User-defined Functions]({{ site.baseurl }}/dev/table/udfs.html)). One or more functions can then be packaged into a dependency JAR for the SQL Client.
 
-#### Type Strings
+All functions must be declared in an environment file before being called. For each item in the list of `functions`, one must specify
 
-The following type strings are supported for being defined in an environment file:
+- a `name` under which the function is registered,
+- the source of the function using `from` (restricted to be `class` for now),
+- the `class` which indicates the fully qualified class name of the function and an optional list of `constructor` parameters for instantiation.
 
 {% highlight yaml %}
-VARCHAR
-BOOLEAN
-TINYINT
-SMALLINT
-INT
-BIGINT
-FLOAT
-DOUBLE
-DECIMAL
-DATE
-TIME
-TIMESTAMP
-ROW(fieldtype, ...)              # unnamed row; e.g. ROW(VARCHAR, INT) that is mapped to Flink's RowTypeInfo
-                                 # with indexed fields names f0, f1, ...
-ROW(fieldname fieldtype, ...)    # named row; e.g., ROW(myField VARCHAR, myOtherField INT) that
-                                 # is mapped to Flink's RowTypeInfo
-POJO(class)                      # e.g., POJO(org.mycompany.MyPojoClass) that is mapped to Flink's PojoTypeInfo
-ANY(class)                       # e.g., ANY(org.mycompany.MyClass) that is mapped to Flink's GenericTypeInfo
-ANY(class, serialized)           # used for type information that is not supported by Flink's Table & SQL API
+functions:
+  - name: ...               # required: name of the function
+    from: class             # required: source of the function (can only be "class" for now)
+    class: ...              # required: fully qualified class name of the function
+    constructor:            # optimal: constructor parameters of the function class
+      - ...                 # optimal: a literal parameter with implicit type
+      - class: ...          # optimal: full class name of the parameter
+        constructor:        # optimal: constructor parameters of the parameter's class
+          - type: ...       # optimal: type of the literal parameter
+            value: ...      # optimal: value of the literal parameter
 {% endhighlight %}
 
-#### Rowtime Properties
+Make sure that the order and types of the specified parameters strictly match one of the constructors of your function class.
 
-In order to control the event-time behavior for table sources, the SQL Client provides predefined timestamp extractors and watermark strategies. For more information about time handling in Flink and especially event-time, we recommend the general [event-time section](streaming.html#time-attributes). 
+#### Constructor Parameters
 
-The following timestamp extractors are supported:
+Depending on the user-defined function, it might be necessary to parameterize the implementation before using it in SQL statements.
 
-{% highlight yaml %}
-# Converts an existing BIGINT or TIMESTAMP field in the input into the rowtime attribute.
-rowtime:
-  timestamps:
-    type: from-field
-    from: ...                 # required: original field name in the input
-
-# Converts the assigned timestamps from a DataStream API record into the rowtime attribute 
-# and thus preserves the assigned timestamps from the source.
-rowtime:
-  timestamps:
-    type: from-source
-{% endhighlight %}
+As shown in the example before, when declaring a user-defined function, a class can be configured using constructor parameters in one of the following three ways:
 
-The following watermark strategies are supported:
+**A literal value with implicit type:** The SQL Client will automatically derive the type according to the literal value itself. Currently, only values of `BOOLEAN`, `INT`, `DOUBLE` and `VARCHAR` are supported here.
+If the automatic derivation does not work as expected (e.g., you need a VARCHAR `false`), use explicit types instead.
 
 {% highlight yaml %}
-# Sets a watermark strategy for ascending rowtime attributes. Emits a watermark of the maximum 
-# observed timestamp so far minus 1. Rows that have a timestamp equal to the max timestamp
-# are not late.
-rowtime:
-  watermarks:
-    type: periodic-ascending
-
-# Sets a built-in watermark strategy for rowtime attributes which are out-of-order by a bounded time interval.
-# Emits watermarks which are the maximum observed timestamp minus the specified delay.
-rowtime:
-  watermarks:
-    type: periodic-bounded
-    delay: ...                # required: delay in milliseconds
-
-# Sets a built-in watermark strategy which indicates the watermarks should be preserved from the
-# underlying DataStream API and thus preserves the assigned watermarks from the source.
-rowtime:
-  watermarks:
-    type: from-source
+- true         # -> BOOLEAN (case sensitive)
+- 42           # -> INT
+- 1234.222     # -> DOUBLE
+- foo          # -> VARCHAR
 {% endhighlight %}
 
-{% top %}
-
-### Connector Properties
-
-Flink provides a set of connectors that can be defined in the environment file.
-
-<span class="label label-danger">Attention</span> Currently, connectors can only be used as table sources not sinks.
-
-#### Filesystem Connector
-
-The filesystem connector allows for reading from a local or distributed filesystem. A filesystem can be defined as:
+**A literal value with explicit type:** Explicitly declare the parameter with `type` and `value` properties for type-safety.
 
 {% highlight yaml %}
-connector:
-  type: filesystem
-  path: "file:///path/to/whatever"       # required
+- type: DECIMAL
+  value: 11111111111111111
 {% endhighlight %}
 
-Currently, only files with CSV format can be read from a filesystem. The filesystem connector is included in Flink and does not require an additional JAR file.
+The table below illustrates the supported Java parameter types and the corresponding SQL type strings.
+
+| Java type               |  SQL type         |
+| :---------------------- | :---------------- |
+| `java.math.BigDecimal`  | `DECIMAL`         |
+| `java.lang.Boolean`     | `BOOLEAN`         |
+| `java.lang.Byte`        | `TINYINT`         |
+| `java.lang.Double`      | `DOUBLE`          |
+| `java.lang.Float`       | `REAL`, `FLOAT`   |
+| `java.lang.Integer`     | `INTEGER`, `INT`  |
+| `java.lang.Long`        | `BIGINT`          |
+| `java.lang.Short`       | `SMALLINT`        |
+| `java.lang.String`      | `VARCHAR`         |
 
-#### Kafka Connector
+More types (e.g., `TIMESTAMP` or `ARRAY`), primitive types, and `null` are not supported yet.
 
-The Kafka connector allows for reading from a Apache Kafka topic. It can be defined as follows:
+**A (nested) class instance:** Besides literal values, you can also create (nested) class instances for constructor parameters by specifying the `class` and `constructor` properties.
+This process can be recursively performed until all the constructor parameters are represented with literal values.
 
 {% highlight yaml %}
-connector:
-  type: kafka
-  version: 0.11       # required: valid connector versions are "0.8", "0.9", "0.10", and "0.11"
-  topic: ...          # required: topic name from which the table is read
-  startup-mode: ...   # optional: valid modes are "earliest-offset", "latest-offset",
-                      # "group-offsets", or "specific-offsets"
-  specific-offsets:   # optional: used in case of startup mode with specific offsets
-    - partition: 0
-      offset: 42
-    - partition: 1
-      offset: 300
-  properties:         # optional: connector specific properties
-    - key: zookeeper.connect
-      value: localhost:2181
-    - key: bootstrap.servers
-      value: localhost:9092
-    - key: group.id
-      value: testGroup
+- class: foo.bar.paramClass
+  constructor:
+    - StarryName
+    - class: java.lang.Integer
+      constructor:
+        - class: java.lang.String
+          constructor:
+            - type: VARCHAR
+              value: 3
 {% endhighlight %}
 
-Make sure to download the [Kafka SQL JAR](sqlClient.html#dependencies) file and pass it to the SQL Client.
-
 {% top %}
 
-### Format Properties
-
-Flink provides a set of formats that can be defined in the environment file.
-
-#### CSV Format
+Detached SQL Queries
+------------------------
 
-The CSV format allows to read comma-separated rows. Currently, this is only supported for the filesystem connector.
+In order to define end-to-end SQL pipelines, SQL's `INSERT INTO` statement can be used for submitting long-running, detached queries to a Flink cluster. These queries produce their results into an external system instead of the SQL Client. This allows for dealing with higher parallelism and larger amounts of data. The CLI itself does not have any control over a detached query after submission.
 
-{% highlight yaml %}
-format:
-  type: csv
-  fields:                    # required: format fields
-    - name: field1
-      type: VARCHAR
-    - name: field2
-      type: TIMESTAMP
-  field-delimiter: ","      # optional: string delimiter "," by default 
-  line-delimiter: "\n"       # optional: string delimiter "\n" by default 
-  quote-character: '"'       # optional: single character for string values, empty by default
-  comment-prefix: '#'        # optional: string to indicate comments, empty by default
-  ignore-first-line: false   # optional: boolean flag to ignore the first line, by default it is not skipped
-  ignore-parse-errors: true  # optional: skip records with parse error instead to fail by default
+{% highlight sql %}
+INSERT INTO MyTableSink SELECT * FROM MyTableSource
 {% endhighlight %}
 
-The CSV format is included in Flink and does not require an additional JAR file.
-
-#### JSON Format
-
-The JSON format allows to read and write JSON data that corresponds to a given format schema. The format schema can be defined either as a Flink [type string](sqlClient.html#type-strings), as a JSON schema, or derived from the desired table schema. A type string enables a more SQL-like definition and mapping to the corresponding SQL data types. The JSON schema allows for more complex and nested structures.
-
-If the format schema is equal to the table schema, the schema can also be automatically derived. This allows for defining schema information only once. The names, types, and field order of the format are determined by the table's schema. Time attributes are ignored. A `from` definition in the table schema is interpreted as a field renaming in the format.
+The table sink `MyTableSink` has to be declared in the environment file. See the [connection page](connect.html) for more information about supported external systems and their configuration. An example for an Apache Kafka table sink is shown below.
 
 {% highlight yaml %}
-format:
-  type: json
-  fail-on-missing-field: true   # optional: flag whether to fail if a field is missing or not 
-
-  # required: define the schema either by using a type string which parses numbers to corresponding types
-  schema: "ROW(lon FLOAT, rideTime TIMESTAMP)"
-
-  # or by using a JSON schema which parses to DECIMAL and TIMESTAMP
-  json-schema: >
-    {
-      type: 'object',
-      properties: {
-        lon: {
-          type: 'number'
-        },
-        rideTime: {
-          type: 'string',
-          format: 'date-time'
-        }
-      }
-    }
-
-  # or use the tables schema
-  derive-schema: true
-{% endhighlight %}
-
-The following table shows the mapping of JSON schema types to Flink SQL types:
-
-| JSON schema                       | Flink SQL               |
-| :-------------------------------- | :---------------------- |
-| `object`                          | `ROW`                   |
-| `boolean`                         | `BOOLEAN`               |
-| `array`                           | `ARRAY[_]`              |
-| `number`                          | `DECIMAL`               |
-| `integer`                         | `DECIMAL`               |
-| `string`                          | `VARCHAR`               |
-| `string` with `format: date-time` | `TIMESTAMP`             |
-| `string` with `format: date`      | `DATE`                  |
-| `string` with `format: time`      | `TIME`                  |
-| `string` with `encoding: base64`  | `ARRAY[TINYINT]`        |
-| `null`                            | `NULL` (unsupported yet)|
-
-
-Currently, Flink supports only a subset of the [JSON schema specification](http://json-schema.org/) `draft-07`. Union types (as well as `allOf`, `anyOf`, `not`) are not supported yet. `oneOf` and arrays of types are only supported for specifying nullability.
-
-Simple references that link to a common definition in the document are supported as shown in the more complex example below:
-
-{% highlight json %}
-{
-  "definitions": {
-    "address": {
-      "type": "object",
-      "properties": {
-        "street_address": {
-          "type": "string"
-        },
-        "city": {
-          "type": "string"
-        },
-        "state": {
-          "type": "string"
-        }
-      },
-      "required": [
-        "street_address",
-        "city",
-        "state"
-      ]
-    }
-  },
-  "type": "object",
-  "properties": {
-    "billing_address": {
-      "$ref": "#/definitions/address"
-    },
-    "shipping_address": {
-      "$ref": "#/definitions/address"
-    },
-    "optional_address": {
-      "oneOf": [
-        {
-          "type": "null"
-        },
-        {
-          "$ref": "#/definitions/address"
-        }
-      ]
-    }
-  }
-}
+tables:
+  - name: MyTableSink
+    type: sink
+    update-mode: append
+    connector:
+      property-version: 1
+      type: kafka
+      version: "0.11"
+      topic: OutputTopic
+      properties:
+        - key: zookeeper.connect
+          value: localhost:2181
+        - key: bootstrap.servers
+          value: localhost:9092
+        - key: group.id
+          value: testGroup
+    format:
+      property-version: 1
+      type: json
+      derive-schema: true
+    schema:
+      - name: rideId
+        type: LONG
+      - name: lon
+        type: FLOAT
+      - name: lat
+        type: FLOAT
+      - name: rideTime
+        type: TIMESTAMP
 {% endhighlight %}
 
-Make sure to download the [JSON SQL JAR](sqlClient.html#dependencies) file and pass it to the SQL Client.
-
-#### Apache Avro Format
+The SQL Client makes sure that a statement is successfully submitted to the cluster. Once the query is submitted, the CLI will show information about the Flink job.
 
-The [Apache Avro](https://avro.apache.org/) format allows to read and write Avro data that corresponds to a given format schema. The format schema can be defined either as a fully qualified class name of an Avro specific record or as an Avro schema string. If a class name is used, the class must be available in the classpath during runtime.
-
-{% highlight yaml %}
-format:
-  type: avro
-
-  # required: define the schema either by using an Avro specific record class
-  record-class: "org.organization.types.User"
-
-  # or by using an Avro schema
-  avro-schema: >
-    {
-      "type": "record",
-      "name": "test",
-      "fields" : [
-        {"name": "a", "type": "long"},
-        {"name": "b", "type": "string"}
-      ]
-    }
+{% highlight text %}
+[INFO] Table update statement has been successfully submitted to the cluster:
+Cluster ID: StandaloneClusterId
+Job ID: 6f922fe5cba87406ff23ae4a7bb79044
+Web interface: http://localhost:8081
 {% endhighlight %}
 
-Avro types are mapped to the corresponding SQL data types. Union types are only supported for specifying nullability otherwise they are converted to an `ANY` type. The following table shows the mapping:
-
-| Avro schema                                 | Flink SQL               |
-| :------------------------------------------ | :---------------------- |
-| `record`                                    | `ROW`                   |
-| `enum`                                      | `VARCHAR`               |
-| `array`                                     | `ARRAY[_]`              |
-| `map`                                       | `MAP[VARCHAR, _]`       |
-| `union`                                     | non-null type or `ANY`  |
-| `fixed`                                     | `ARRAY[TINYINT]`        |
-| `string`                                    | `VARCHAR`               |
-| `bytes`                                     | `ARRAY[TINYINT]`        |
-| `int`                                       | `INT`                   |
-| `long`                                      | `BIGINT`                |
-| `float`                                     | `FLOAT`                 |
-| `double`                                    | `DOUBLE`                |
-| `boolean`                                   | `BOOLEAN`               |
-| `int` with `logicalType: date`              | `DATE`                  |
-| `int` with `logicalType: time-millis`       | `TIME`                  |
-| `int` with `logicalType: time-micros`       | `INT`                   |
-| `long` with `logicalType: timestamp-millis` | `TIMESTAMP`             |
-| `long` with `logicalType: timestamp-micros` | `BIGINT`                |
-| `bytes` with `logicalType: decimal`         | `DECIMAL`               |
-| `fixed` with `logicalType: decimal`         | `DECIMAL`               |
-| `null`                                      | `NULL` (unsupported yet)|
-
-Avro uses [Joda-Time](http://www.joda.org/joda-time/) for representing logical date and time types in specific record classes. The Joda-Time dependency is not part of Flink's SQL JAR distribution. Therefore, make sure that Joda-Time is in your classpath together with your specific record class during runtime. Avro formats specified via a schema string do not require Joda-Time to be present.
-
-Make sure to download the [Apache Avro SQL JAR](sqlClient.html#dependencies) file and pass it to the SQL Client.
+<span class="label label-danger">Attention</span> The SQL Client does not track the status of the running Flink job after submission. The CLI process can be shutdown after the submission without affecting the detached query. Flink's [restart strategy]({{ site.baseurl }}/dev/restart_strategies.html) takes care of the fault-tolerance. A query can be cancelled using Flink's web interface, command-line, or REST API.
 
 {% top %}
 
diff --git a/docs/dev/table/tableApi.md b/docs/dev/table/tableApi.md
index 44436c4bb64..5eeb86bcbbc 100644
--- a/docs/dev/table/tableApi.md
+++ b/docs/dev/table/tableApi.md
@@ -506,7 +506,7 @@ Table result = left.join(right).where("a = d").select("a, b, e");
 
     <tr>
       <td>
-        <strong>Outer Joins</strong><br>
+        <strong>Outer Join</strong><br>
         <span class="label label-primary">Batch</span>
         <span class="label label-primary">Streaming</span>
         <span class="label label-info">Result Updating</span>
@@ -521,6 +521,7 @@ Table leftOuterResult = left.leftOuterJoin(right, "a = d").select("a, b, e");
 Table rightOuterResult = left.rightOuterJoin(right, "a = d").select("a, b, e");
 Table fullOuterResult = left.fullOuterJoin(right, "a = d").select("a, b, e");
 {% endhighlight %}
+<p><b>Note:</b> For streaming queries the required state to compute the query result might grow infinitely depending on the number of distinct input rows. Please provide a query configuration with valid retention interval to prevent excessive state size. See <a href="streaming.html">Streaming Concepts</a> for details.</p>
       </td>
     </tr>
     <tr>
@@ -551,28 +552,28 @@ Table result = left.join(right)
     </tr>
     <tr>
     	<td>
-        <strong>TableFunction Inner Join</strong><br>
+        <strong>Inner Join with Table Function</strong><br>
         <span class="label label-primary">Batch</span> <span class="label label-primary">Streaming</span>
       </td>
     	<td>
         <p>Joins a table with a the results of a table function. Each row of the left (outer) table is joined with all rows produced by the corresponding call of the table function. A row of the left (outer) table is dropped, if its table function call returns an empty result.
         </p>
 {% highlight java %}
-// register function
+// register User-Defined Table Function
 TableFunction<String> split = new MySplitUDTF();
-tEnv.registerFunction("split", split);
+tableEnv.registerFunction("split", split);
 
 // join
 Table orders = tableEnv.scan("Orders");
 Table result = orders
-    .join(new Table(tEnv, "split(c)").as("s", "t", "v"))
+    .join(new Table(tableEnv, "split(c)").as("s", "t", "v"))
     .select("a, b, s, t, v");
 {% endhighlight %}
       </td>
     </tr>
     <tr>
     	<td>
-        <strong>TableFunction Left Outer Join</strong><br>
+        <strong>Left Outer Join with Table Function</strong><br>
         <span class="label label-primary">Batch</span> <span class="label label-primary">Streaming</span>
       </td>
       <td>
@@ -580,14 +581,14 @@ Table result = orders
         <p><b>Note:</b> Currently, the predicate of a table function left outer join can only be empty or literal <code>true</code>.</p>
         </p>
 {% highlight java %}
-// register function
+// register User-Defined Table Function
 TableFunction<String> split = new MySplitUDTF();
-tEnv.registerFunction("split", split);
+tableEnv.registerFunction("split", split);
 
 // join
 Table orders = tableEnv.scan("Orders");
 Table result = orders
-    .leftOuterJoin(new Table(tEnv, "split(c)").as("s", "t", "v"))
+    .leftOuterJoin(new Table(tableEnv, "split(c)").as("s", "t", "v"))
     .select("a, b, s, t, v");
 {% endhighlight %}
       </td>
@@ -612,6 +613,7 @@ Table result = orders
       <td>
         <strong>Inner Join</strong><br>
         <span class="label label-primary">Batch</span>
+        <span class="label label-primary">Streaming</span>
       </td>
       <td>
         <p>Similar to a SQL JOIN clause. Joins two tables. Both tables must have distinct field names and at least one equality join predicate must be defined through join operator or using a where or filter operator.</p>
@@ -620,12 +622,15 @@ val left = ds1.toTable(tableEnv, 'a, 'b, 'c)
 val right = ds2.toTable(tableEnv, 'd, 'e, 'f)
 val result = left.join(right).where('a === 'd).select('a, 'b, 'e)
 {% endhighlight %}
+<p><b>Note:</b> For streaming queries the required state to compute the query result might grow infinitely depending on the number of distinct input rows. Please provide a query configuration with valid retention interval to prevent excessive state size. See <a href="streaming.html">Streaming Concepts</a> for details.</p>
       </td>
     </tr>
     <tr>
       <td>
-        <strong>Outer Joins</strong><br>
+        <strong>Outer Join</strong><br>
         <span class="label label-primary">Batch</span>
+        <span class="label label-primary">Streaming</span>
+        <span class="label label-info">Result Updating</span>
       </td>
       <td>
         <p>Similar to SQL LEFT/RIGHT/FULL OUTER JOIN clauses. Joins two tables. Both tables must have distinct field names and at least one equality join predicate must be defined.</p>
@@ -637,6 +642,7 @@ val leftOuterResult = left.leftOuterJoin(right, 'a === 'd).select('a, 'b, 'e)
 val rightOuterResult = left.rightOuterJoin(right, 'a === 'd).select('a, 'b, 'e)
 val fullOuterResult = left.fullOuterJoin(right, 'a === 'd).select('a, 'b, 'e)
 {% endhighlight %}
+<p><b>Note:</b> For streaming queries the required state to compute the query result might grow infinitely depending on the number of distinct input rows. Please provide a query configuration with valid retention interval to prevent excessive state size. See <a href="streaming.html">Streaming Concepts</a> for details.</p>
       </td>
     </tr>
     <tr>
@@ -648,6 +654,7 @@ val fullOuterResult = left.fullOuterJoin(right, 'a === 'd).select('a, 'b, 'e)
         <p><b>Note:</b> Time-windowed joins are a subset of regular joins that can be processed in a streaming fashion.</p>
 
         <p>A time-windowed join requires at least one equi-join predicate and a join condition that bounds the time on both sides. Such a condition can be defined by two appropriate range predicates (<code>&lt;, &lt;=, &gt;=, &gt;</code>) or a single equality predicate that compares <a href="streaming.html#time-attributes">time attributes</a> of the same type (i.e., processing time or event time) of both input tables.</p> 
+
         <p>For example, the following predicates are valid window join conditions:</p>
 
         <ul>
@@ -667,14 +674,14 @@ val result = left.join(right)
     </tr>
     <tr>
     	<td>
-        <strong>TableFunction Inner Join</strong><br>
+        <strong>Inner Join with Table Function</strong><br>
         <span class="label label-primary">Batch</span> <span class="label label-primary">Streaming</span>
       </td>
     	<td>
         <p>Joins a table with a the results of a table function. Each row of the left (outer) table is joined with all rows produced by the corresponding call of the table function. A row of the left (outer) table is dropped, if its table function call returns an empty result.
         </p>
         {% highlight scala %}
-// instantiate function
+// instantiate User-Defined Table Function
 val split: TableFunction[_] = new MySplitUDTF()
 
 // join
@@ -686,14 +693,14 @@ val result: Table = table
     </tr>
     <tr>
     	<td>
-        <strong>TableFunction Left Outer Join</strong><br>
+        <strong>Left Outer Join with Table Function</strong><br>
         <span class="label label-primary">Batch</span> <span class="label label-primary">Streaming</span></td>
     	<td>
         <p>Joins a table with a the results of a table function. Each row of the left (outer) table is joined with all rows produced by the corresponding call of the table function. If a table function call returns an empty result, the corresponding outer row is preserved and the result padded with null values.
         <p><b>Note:</b> Currently, the predicate of a table function left outer join can only be empty or literal <code>true</code>.</p>
         </p>
 {% highlight scala %}
-// instantiate function
+// instantiate User-Defined Table Function
 val split: TableFunction[_] = new MySplitUDTF()
 
 // join
@@ -2744,7 +2751,7 @@ TEMPORAL.extract(TIMEINTERVALUNIT)
 {% endhighlight %}
       </td>
       <td>
-        <p>Extracts parts of a time point or time interval. Returns the part as a long value. E.g. <code>'2006-06-05'.toDate.extract(DAY)</code> leads to 5.</p>
+        <p>Extracts parts of a time point or time interval. Returns the part as a long value. E.g. <code>'2006-06-05'.toDate.extract(DAY)</code> leads to 5 or <code>'2006-06-05'.toDate.extract(QUARTER)</code> leads to 2.</p>
       </td>
     </tr>
 
@@ -2770,17 +2777,6 @@ TIMEPOINT.ceil(TIMEINTERVALUNIT)
       </td>
     </tr>
 
-    <tr>
-      <td>
-        {% highlight java %}
-DATE.quarter()
-{% endhighlight %}
-      </td>
-      <td>
-        <p>Returns the quarter of a year from a SQL date. E.g. <code>'1994-09-27'.toDate.quarter()</code> leads to 3.</p>
-      </td>
-    </tr>
-
     <tr>
       <td>
         {% highlight java %}
@@ -4261,7 +4257,7 @@ TEMPORAL.extract(TimeIntervalUnit)
 {% endhighlight %}
       </td>
       <td>
-        <p>Extracts parts of a time point or time interval. Returns the part as a long value. E.g. <code>"2006-06-05".toDate.extract(TimeIntervalUnit.DAY)</code> leads to 5.</p>
+        <p>Extracts parts of a time point or time interval. Returns the part as a long value. E.g. <code>"2006-06-05".toDate.extract(TimeIntervalUnit.DAY)</code> leads to 5 or <code>'2006-06-05'.toDate.extract(QUARTER)</code> leads to 2.</p>
       </td>
     </tr>
 
@@ -4287,17 +4283,6 @@ TIMEPOINT.ceil(TimeIntervalUnit)
       </td>
     </tr>
 
-    <tr>
-      <td>
-        {% highlight scala %}
-DATE.quarter()
-{% endhighlight %}
-      </td>
-      <td>
-        <p>Returns the quarter of a year from a SQL date. E.g. <code>"1994-09-27".toDate.quarter()</code> leads to 3.</p>
-      </td>
-    </tr>
-
     <tr>
       <td>
         {% highlight scala %}
diff --git a/docs/dev/table/udfs.md b/docs/dev/table/udfs.md
index 7dce8009777..20bf49d5999 100644
--- a/docs/dev/table/udfs.md
+++ b/docs/dev/table/udfs.md
@@ -141,7 +141,7 @@ Similar to a user-defined scalar function, a user-defined table function takes z
 
 In order to define a table function one has to extend the base class `TableFunction` in `org.apache.flink.table.functions` and implement (one or more) evaluation methods. The behavior of a table function is determined by its evaluation methods. An evaluation method must be declared `public` and named `eval`. The `TableFunction` can be overloaded by implementing multiple methods named `eval`. The parameter types of the evaluation methods determine all valid parameters of the table function. Evaluation methods can also support variable arguments, such as `eval(String... strs)`. The type of the returned table is determined by the generic type of `TableFunction`. Evaluation methods emit output rows using the protected `collect(T)` method.
 
-In the Table API, a table function is used with `.join(Expression)` or `.leftOuterJoin(Expression)` for Scala users and `.join(String)` or `.leftOuterJoin(String)` for Java users. The `join` operator (cross) joins each row from the outer table (table on the left of the operator) with all rows produced by the table-valued function (which is on the right side of the operator). The `leftOuterJoin` operator joins each row from the outer table (table on the left of the operator) with all rows produced by the table-valued function (which is on the right side of the operator) and preserves outer rows for which the table function returns an empty table. In SQL use `LATERAL TABLE(<TableFunction>)` with CROSS JOIN and LEFT JOIN with an ON TRUE join condition (see examples below).
+In the Table API, a table function is used with `.join(Table)` or `.leftOuterJoin(Table)`. The `join` operator (cross) joins each row from the outer table (table on the left of the operator) with all rows produced by the table-valued function (which is on the right side of the operator). The `leftOuterJoin` operator joins each row from the outer table (table on the left of the operator) with all rows produced by the table-valued function (which is on the right side of the operator) and preserves outer rows for which the table function returns an empty table. In SQL use `LATERAL TABLE(<TableFunction>)` with CROSS JOIN and LEFT JOIN with an ON TRUE join condition (see examples below).
 
 The following example shows how to define table-valued function, register it in the TableEnvironment, and call it in a query. Note that you can configure your table function via a constructor before it is registered: 
 
@@ -171,8 +171,10 @@ Table myTable = ...         // table schema: [a: String]
 tableEnv.registerFunction("split", new Split("#"));
 
 // Use the table function in the Java Table API. "as" specifies the field names of the table.
-myTable.join("split(a) as (word, length)").select("a, word, length");
-myTable.leftOuterJoin("split(a) as (word, length)").select("a, word, length");
+myTable.join(new Table(tableEnv, "split(a) as (word, length)"))
+    .select("a, word, length");
+myTable.leftOuterJoin(new Table(tableEnv, "split(a) as (word, length)"))
+    .select("a, word, length");
 
 // Use the table function in SQL with LATERAL and TABLE keywords.
 // CROSS JOIN a table function (equivalent to "join" in Table API).
diff --git a/docs/dev/types_serialization.md b/docs/dev/types_serialization.md
index a0f497b3911..9bace8cdbee 100644
--- a/docs/dev/types_serialization.md
+++ b/docs/dev/types_serialization.md
@@ -238,7 +238,7 @@ as possible via reflection, using the few bits that Java preserves (mainly funct
 This logic also contains some simple type inference for cases where the return type of a function depends on its input type:
 
 {% highlight java %}
-public class AppendOne<T> extends MapFunction<T, Tuple2<T, Long>> {
+public class AppendOne<T> implements MapFunction<T, Tuple2<T, Long>> {
 
     public Tuple2<T, Long> map(T value) {
         return new Tuple2<T, Long>(value, 1L);
diff --git a/docs/fig/interval-join.svg b/docs/fig/interval-join.svg
new file mode 100644
index 00000000000..25b59324548
--- /dev/null
+++ b/docs/fig/interval-join.svg
@@ -0,0 +1,147 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+<svg width="1000px" height="350px" viewBox="0 0 1000 350" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" style="background: #FFFFFF;">
+    <!-- Generator: Sketch 48.2 (47327) - http://www.bohemiancoding.com/sketch -->
+    <title>Interval Join (Selected)</title>
+    <desc>Created with Sketch.</desc>
+    <defs></defs>
+    <g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
+        <g id="Interval-Join-(Selected)">
+            <text id="time" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                <tspan x="882" y="262">time</tspan>
+            </text>
+            <g id="Group-3" transform="translate(116.000000, 229.000000)" stroke="#979797" stroke-linecap="square">
+                <path d="M0.5,5.5 L781.5,5.5" id="Line" stroke-width="2"></path>
+                <path id="Line-decoration-1" d="M781.5,5.5 L770.7,2.5 L770.7,8.5 L781.5,5.5 Z" stroke-width="2"></path>
+                <path d="M132.5,0.5 L132.5,10.5" id="Line-3"></path>
+                <path d="M31.5,0.5 L31.5,10.5" id="Line-3-Copy"></path>
+                <path d="M234.5,0.5 L234.5,10.5" id="Line-3-Copy"></path>
+                <path d="M334.5,0.5 L334.5,10.5" id="Line-3-Copy"></path>
+                <path d="M435.5,0.5 L435.5,10.5" id="Line-3-Copy"></path>
+                <path d="M536.5,0.5 L536.5,10.5" id="Line-3-Copy"></path>
+                <path d="M638.5,0.5 L638.5,10.5" id="Line-3-Copy"></path>
+                <path d="M739.5,0.5 L739.5,10.5" id="Line-3-Copy"></path>
+            </g>
+            <g id="Group-3-Copy" transform="translate(116.000000, 93.000000)" stroke="#979797" stroke-linecap="square">
+                <path d="M0.5,5.5 L781.5,5.5" id="Line" stroke-width="2"></path>
+                <path id="Line-decoration-1" d="M781.5,5.5 L770.7,2.5 L770.7,8.5 L781.5,5.5 Z" stroke-width="2"></path>
+                <path d="M132.5,0.5 L132.5,10.5" id="Line-3"></path>
+                <path d="M31.5,0.5 L31.5,10.5" id="Line-3-Copy"></path>
+                <path d="M234.5,0.5 L234.5,10.5" id="Line-3-Copy"></path>
+                <path d="M334.5,0.5 L334.5,10.5" id="Line-3-Copy"></path>
+                <path d="M435.5,0.5 L435.5,10.5" id="Line-3-Copy"></path>
+                <path d="M536.5,0.5 L536.5,10.5" id="Line-3-Copy"></path>
+                <path d="M638.5,0.5 L638.5,10.5" id="Line-3-Copy"></path>
+                <path d="M739.5,0.5 L739.5,10.5" id="Line-3-Copy"></path>
+            </g>
+            <g id="StreamRecord-Copy-7" transform="translate(740.000000, 85.000000)">
+                <circle id="Oval" stroke="#979797" fill="#7ED321" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="6" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">6</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-7" transform="translate(842.000000, 86.000000)">
+                <circle id="Oval" stroke="#979797" fill="#7ED321" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="7" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">7</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy" transform="translate(435.000000, 222.000000)">
+                <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="3" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">3</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-2" transform="translate(540.000000, 222.000000)">
+                <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="4" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">4</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-3" transform="translate(640.000000, 222.000000)">
+                <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="5" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">5</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-5" transform="translate(842.000000, 222.000000)">
+                <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="7" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">7</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-6" transform="translate(136.000000, 222.000000)">
+                <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="0" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">0</tspan>
+                </text>
+            </g>
+            <text id="0,0-0,1" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#9B9B9B">
+                <tspan x="131" y="276">0,0</tspan>
+                <tspan x="131" y="298">0,1</tspan>
+            </text>
+            <text id="2,0-2,1" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#34323B">
+                <tspan x="334" y="276">2,0</tspan>
+                <tspan x="334" y="298">2,1</tspan>
+            </text>
+            <text id="3,1" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#9B9B9B">
+                <tspan x="429" y="276">3,1</tspan>
+            </text>
+            <text id="5,6" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#9B9B9B">
+                <tspan x="634" y="276">5,6</tspan>
+            </text>
+            <text id="7,6-7,7" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#9B9B9B">
+                <tspan x="836" y="276">7,6</tspan>
+                <tspan x="836" y="298">7,7</tspan>
+            </text>
+            <polygon id="Path-3" fill-opacity="0.100000001" fill="#9B9B9B" points="-51 99.8380503 151.566279 235.968947 251.677716 99.8380503"></polygon>
+            <polygon id="Path-3" fill-opacity="0.100000001" fill="#9B9B9B" points="246 99.8380503 448.566279 235.968947 548.677716 99.8380503"></polygon>
+            <polygon id="Path-3" fill-opacity="0.100000001" fill="#9B9B9B" points="350 99.8380503 552.566279 235.968947 652.677716 99.8380503"></polygon>
+            <polygon id="Path-3" fill-opacity="0.100000001" fill="#9B9B9B" points="448 99.8380503 650.566279 235.968947 750.677716 99.8380503"></polygon>
+            <polygon id="Path-3" fill-opacity="0.100000001" fill="#9B9B9B" points="651 99.8380503 853.566279 235.968947 953.677716 99.8380503"></polygon>
+            <polygon id="Path-3" fill-opacity="0.5" fill="#F8E71C" points="148.284508 99.8380503 350.850787 235.968947 450.962224 99.8380503"></polygon>
+            <g id="StreamRecord-Copy-7" transform="translate(235.000000, 85.000000)">
+                <circle id="Oval" stroke="#979797" fill="#7ED321" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="1" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">1</tspan>
+                </text>
+            </g>
+            <text id="lower-bound" transform="translate(246.800863, 159.474217) rotate(34.000000) translate(-246.800863, -159.474217) " font-family="AndaleMono, Andale Mono" font-size="14" font-weight="normal" fill="#34323B">
+                <tspan x="200.300863" y="164.474217">lower bound</tspan>
+            </text>
+            <text id="upper-bound" transform="translate(398.500000, 156.000000) rotate(-53.000000) translate(-398.500000, -156.000000) " font-family="AndaleMono, Andale Mono" font-size="14" font-weight="normal" fill="#34323B">
+                <tspan x="352" y="161">upper bound</tspan>
+            </text>
+            <g id="StreamRecord" transform="translate(338.000000, 222.000000)">
+                <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="2" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">2</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-6" transform="translate(135.000000, 86.000000)">
+                <circle id="Oval" stroke="#979797" fill="#7ED321" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="0" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">0</tspan>
+                </text>
+            </g>
+        </g>
+    </g>
+</svg>
diff --git a/docs/fig/session-window-join.svg b/docs/fig/session-window-join.svg
new file mode 100644
index 00000000000..8fb3293edc9
--- /dev/null
+++ b/docs/fig/session-window-join.svg
@@ -0,0 +1,125 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+<svg width="1000px" height="350px" viewBox="0 0 1000 350" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" style="background: #FFFFFF;">
+    <!-- Generator: Sketch 48.2 (47327) - http://www.bohemiancoding.com/sketch -->
+    <title>Session Window Join</title>
+    <desc>Created with Sketch.</desc>
+    <defs>
+        <rect id="path-1" x="386" y="52" width="37" height="113"></rect>
+        <rect id="path-2" x="701" y="52" width="37" height="113"></rect>
+    </defs>
+    <g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
+        <g id="Session-Window-Join">
+            <path d="M58.5,196.5 L941.5,196.5" id="Line" stroke="#979797" stroke-linecap="square"></path>
+            <path id="Line-decoration-1" d="M941.5,196.5 L930.7,193.5 L930.7,199.5 L941.5,196.5 Z" stroke="#979797" stroke-linecap="square"></path>
+            <text id="time" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                <tspan x="926" y="227">time</tspan>
+            </text>
+            <g id="StreamRecord" transform="translate(307.000000, 138.000000)">
+                <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="2" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">2</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-3" transform="translate(547.000000, 138.000000)">
+                <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="5" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">5</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-4" transform="translate(627.000000, 138.000000)">
+                <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="6" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">6</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-4" transform="translate(787.000000, 138.000000)">
+                <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="6" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">6</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-10" transform="translate(787.000000, 138.000000)">
+                <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="8" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">8</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-9" transform="translate(867.000000, 138.000000)">
+                <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="9" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">9</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-7" transform="translate(227.000000, 138.000000)">
+                <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="1" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">1</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy" transform="translate(547.000000, 68.000000)">
+                <circle id="Oval" stroke="#979797" fill="#7ED321" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="5" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">5</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-2" transform="translate(467.000000, 68.000000)">
+                <circle id="Oval" stroke="#979797" fill="#7ED321" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="4" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">4</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-6" transform="translate(148.000000, 68.000000)">
+                <circle id="Oval" stroke="#979797" fill="#7ED321" cx="12.5" cy="12.5" r="12.5"></circle>
+                <text id="0" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="8" y="18">0</tspan>
+                </text>
+            </g>
+            <polyline id="Path-2" stroke="#979797" points="133 163 133 50 361 50 361 163"></polyline>
+            <polyline id="Path-2" stroke="#979797" points="449 163 449 50 677 50 677 163"></polyline>
+            <polyline id="Path-2" stroke="#979797" points="758 163 758 50 926 50 926 163"></polyline>
+            <g id="Rectangle-3">
+                <use fill="#D8D8D8" fill-rule="evenodd" xlink:href="#path-1"></use>
+                <rect stroke="#979797" stroke-width="1" x="386.5" y="52.5" width="36" height="112"></rect>
+            </g>
+            <g id="Rectangle-3">
+                <use fill="#D8D8D8" fill-rule="evenodd" xlink:href="#path-2"></use>
+                <rect stroke="#979797" stroke-width="1" x="701.5" y="52.5" width="36" height="112"></rect>
+            </g>
+            <text id="GAP" transform="translate(406.000000, 108.500000) rotate(-90.000000) translate(-406.000000, -108.500000) " font-family="AndaleMono, Andale Mono" font-size="18" font-weight="normal" fill="#000000">
+                <tspan x="389.5" y="114.5">GAP</tspan>
+            </text>
+            <text id="GAP" transform="translate(719.000000, 108.500000) rotate(-90.000000) translate(-719.000000, -108.500000) " font-family="AndaleMono, Andale Mono" font-size="18" font-weight="normal" fill="#000000">
+                <tspan x="702.5" y="114.5">GAP</tspan>
+            </text>
+            <text id="1,0-2,0" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#34323B">
+                <tspan x="227" y="234">1,0</tspan>
+                <tspan x="227" y="256">2,0</tspan>
+            </text>
+            <text id="5,4-5,5-6,4-6,5" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#34323B">
+                <tspan x="545" y="230">5,4</tspan>
+                <tspan x="545" y="252">5,5</tspan>
+                <tspan x="545" y="274">6,4</tspan>
+                <tspan x="545" y="296">6,5</tspan>
+            </text>
+        </g>
+    </g>
+</svg>
diff --git a/docs/fig/sliding-window-join.svg b/docs/fig/sliding-window-join.svg
new file mode 100644
index 00000000000..f544e0de35f
--- /dev/null
+++ b/docs/fig/sliding-window-join.svg
@@ -0,0 +1,113 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+<svg width="1000px" height="350px" viewBox="0 0 1000 350" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" style="background: #FFFFFF;">
+    <!-- Generator: Sketch 48.2 (47327) - http://www.bohemiancoding.com/sketch -->
+    <title>Sliding Window Join</title>
+    <desc>Created with Sketch.</desc>
+    <defs></defs>
+    <g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
+        <g id="Sliding-Window-Join">
+            <path d="M101.5,215.5 L882.5,215.5" id="Line" stroke="#979797" stroke-linecap="square"></path>
+            <path id="Line-decoration-1" d="M882.5,215.5 L871.7,212.5 L871.7,218.5 L882.5,215.5 Z" stroke="#979797" stroke-linecap="square"></path>
+            <text id="time" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                <tspan x="867" y="242">time</tspan>
+            </text>
+            <path d="M544,53 L544,163.8 L650.44697,163.8 L650.44697,53 L544,53 Z" id="Rectangle-Copy" stroke="#BD10E0" stroke-width="2"></path>
+            <path d="M490,46 L490,170.9 L596.44697,170.9 L596.44697,46 L490,46 Z" id="Rectangle-Copy-2" stroke="#3F8FAE" stroke-width="2"></path>
+            <path d="M381,46 L381,170.9 L487.44697,170.9 L487.44697,46 L381,46 Z" id="Rectangle-Copy-3" stroke="#50E3C2" stroke-width="2"></path>
+            <path d="M327,53 L327,163.8 L433.44697,163.8 L433.44697,53 L327,53 Z" id="Rectangle-Copy-4" stroke="#F5A623" stroke-width="2"></path>
+            <path d="M435,39 L435,178 L541.44697,178 L541.44697,39 L435,39 Z" id="Rectangle-Copy-5" stroke="#417505" stroke-width="2"></path>
+            <path d="M273.98992,60 L273.010183,156.7 L379.44697,156.7 L379.44697,60 L273.98992,60 Z" id="Rectangle" stroke="#D0021B" stroke-width="2"></path>
+            <rect id="Rectangle-2" fill="#FFFFFF" x="257" y="70" width="409" height="78.96"></rect>
+            <g id="StreamRecord" transform="translate(463.000000, 124.000000)">
+                <ellipse id="Oval" stroke="#979797" fill="#F5A623" cx="9.68276515" cy="9.5" rx="9.68276515" ry="9.5"></ellipse>
+                <text id="2" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="6.1969697" y="15">2</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy" transform="translate(510.000000, 124.000000)">
+                <ellipse id="Oval" stroke="#979797" fill="#F5A623" cx="9.68276515" cy="9.5" rx="9.68276515" ry="9.5"></ellipse>
+                <text id="3" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="6.1969697" y="15">3</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-2" transform="translate(564.000000, 124.000000)">
+                <ellipse id="Oval" stroke="#979797" fill="#F5A623" cx="9.68276515" cy="9.5" rx="9.68276515" ry="9.5"></ellipse>
+                <text id="4" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="6.1969697" y="15">4</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-7" transform="translate(401.000000, 124.000000)">
+                <ellipse id="Oval" stroke="#979797" fill="#F5A623" cx="9.68276515" cy="9.5" rx="9.68276515" ry="9.5"></ellipse>
+                <text id="1" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="6.1969697" y="15">1</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy" transform="translate(507.000000, 75.000000)">
+                <ellipse id="Oval" stroke="#979797" fill="#7ED321" cx="9.68276515" cy="9.5" rx="9.68276515" ry="9.5"></ellipse>
+                <text id="3" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="6.1969697" y="15">3</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-2" transform="translate(563.000000, 75.000000)">
+                <ellipse id="Oval" stroke="#979797" fill="#7ED321" cx="9.68276515" cy="9.5" rx="9.68276515" ry="9.5"></ellipse>
+                <text id="4" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="6.1969697" y="15">4</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-6" transform="translate(340.000000, 124.000000)">
+                <ellipse id="Oval" stroke="#979797" fill="#F5A623" cx="9.68276515" cy="9.5" rx="9.68276515" ry="9.5"></ellipse>
+                <text id="0" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="6.1969697" y="15">0</tspan>
+                </text>
+            </g>
+            <g id="StreamRecord-Copy-6" transform="translate(340.000000, 75.000000)">
+                <ellipse id="Oval" stroke="#979797" fill="#7ED321" cx="9.68276515" cy="9.5" rx="9.68276515" ry="9.5"></ellipse>
+                <text id="0" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                    <tspan x="6.1969697" y="15">0</tspan>
+                </text>
+            </g>
+            <text id="0,0" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#D0021B">
+                <tspan x="307" y="242">0,0</tspan>
+            </text>
+            <text id="0,0-1,0" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#F5A623">
+                <tspan x="362" y="242">0,0</tspan>
+                <tspan x="362" y="264">1,0</tspan>
+            </text>
+            <text id="2,3-3,3" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#417505">
+                <tspan x="470" y="242">2,3</tspan>
+                <tspan x="470" y="264">3,3</tspan>
+            </text>
+            <text id="3,3-3,4-4,3-4,4" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#3F8FAE">
+                <tspan x="524" y="242">3,3</tspan>
+                <tspan x="524" y="264">3,4</tspan>
+                <tspan x="524" y="286">4,3</tspan>
+                <tspan x="524" y="308">4,4</tspan>
+            </text>
+            <text id="4,4" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#9013FE">
+                <tspan x="579" y="242">4,4</tspan>
+            </text>
+            <text id="-" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#50E3C2">
+                <tspan x="428" y="242">-</tspan>
+            </text>
+        </g>
+    </g>
+</svg>
diff --git a/docs/fig/ssl_internal_external.svg b/docs/fig/ssl_internal_external.svg
new file mode 100755
index 00000000000..04262d29cbc
--- /dev/null
+++ b/docs/fig/ssl_internal_external.svg
@@ -0,0 +1,336 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="831.19"
+   height="364.59875"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.5 r10040">
+  <defs
+     id="defs4" />
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="0.35"
+     inkscape:cx="514.76354"
+     inkscape:cy="76.03094"
+     inkscape:document-units="px"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:window-width="1920"
+     inkscape:window-height="1178"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(139.76354,-243.79437)">
+    <g
+       id="g3138"
+       transform="translate(-199.38854,144.82812)">
+      <path
+         id="path3140"
+         d="m 649.32426,123.89336 c 0,-6.97673 5.66391,-12.67815 12.67816,-12.67815 l 148.1244,0 c 7.01425,0 12.67816,5.70142 12.67816,12.67815 l 0,50.75015 c 0,7.01425 -5.66391,12.67816 -12.67816,12.67816 l -148.1244,0 c -7.01425,0 -12.67816,-5.66391 -12.67816,-12.67816 z"
+         style="fill:#afabab;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         inkscape:connector-curvature="0" />
+      <text
+         id="text3142"
+         style="font-size:22.5056076px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="144.70425"
+         x="711.15765"
+         xml:space="preserve">Task </text>
+      <text
+         id="text3144"
+         style="font-size:22.5056076px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="171.71098"
+         x="687.45178"
+         xml:space="preserve">Manager</text>
+      <path
+         id="path3146"
+         d="m 649.32426,256.45139 c 0,-7.01425 5.66391,-12.71567 12.67816,-12.71567 l 148.1244,0 c 7.01425,0 12.67816,5.70142 12.67816,12.71567 l 0,50.71263 c 0,7.01425 -5.66391,12.71567 -12.67816,12.71567 l -148.1244,0 c -7.01425,0 -12.67816,-5.70142 -12.67816,-12.71567 z"
+         style="fill:#afabab;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         inkscape:connector-curvature="0" />
+      <text
+         id="text3148"
+         style="font-size:22.5056076px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="277.28275"
+         x="711.15765"
+         xml:space="preserve">Task </text>
+      <text
+         id="text3150"
+         style="font-size:22.5056076px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="304.28949"
+         x="687.45178"
+         xml:space="preserve">Manager</text>
+      <path
+         id="path3152"
+         d="m 649.32426,389.12194 c 0,-7.01425 5.66391,-12.67816 12.67816,-12.67816 l 148.1244,0 c 7.01425,0 12.67816,5.66391 12.67816,12.67816 l 0,50.75014 c 0,7.01425 -5.66391,12.67816 -12.67816,12.67816 l -148.1244,0 c -7.01425,0 -12.67816,-5.66391 -12.67816,-12.67816 z"
+         style="fill:#afabab;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         inkscape:connector-curvature="0" />
+      <text
+         id="text3154"
+         style="font-size:22.5056076px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="409.86127"
+         x="711.15765"
+         xml:space="preserve">Task </text>
+      <text
+         id="text3156"
+         style="font-size:22.5056076px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="436.86801"
+         x="687.45178"
+         xml:space="preserve">Manager</text>
+      <path
+         id="path3158"
+         d="m 330.47608,331.77015 c 0,-6.15153 4.98874,-11.14027 11.14028,-11.14027 l 126.68781,0 c 6.15153,0 11.12152,4.98874 11.12152,11.14027 l 0,44.48608 c 0,6.13278 -4.96999,11.12153 -11.12152,11.12153 l -126.68781,0 c -6.15154,0 -11.14028,-4.98875 -11.14028,-11.12153 z"
+         style="fill:#afabab;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         inkscape:connector-curvature="0" />
+      <text
+         id="text3160"
+         style="font-size:19.95497131px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="349.87692"
+         x="359.48764"
+         xml:space="preserve">Resource</text>
+      <text
+         id="text3162"
+         style="font-size:19.95497131px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="373.8829"
+         x="361.88824"
+         xml:space="preserve">Manager</text>
+      <path
+         id="path3164"
+         d="m 330.47608,184.84605 c 0,-6.13278 4.98874,-11.12152 11.14028,-11.12152 l 126.68781,0 c 6.15153,0 11.12152,4.98874 11.12152,11.12152 l 0,44.48608 c 0,6.15153 -4.96999,11.12152 -11.12152,11.12152 l -126.68781,0 c -6.15154,0 -11.14028,-4.96999 -11.14028,-11.12152 z"
+         style="fill:#afabab;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         inkscape:connector-curvature="0" />
+      <text
+         id="text3166"
+         style="font-size:19.95497131px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="202.97752"
+         x="388.14163"
+         xml:space="preserve">Job</text>
+      <text
+         id="text3168"
+         style="font-size:19.95497131px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="226.98351"
+         x="361.8851"
+         xml:space="preserve">Manager</text>
+      <path
+         id="path3170"
+         d="m 315.00348,164.49723 173.48071,0 0,230.68247 -173.48071,0 z"
+         style="fill:none;stroke:#000000;stroke-width:1.25656307px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <text
+         id="text3172"
+         style="font-size:19.95497131px;font-style:italic;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="418.32153"
+         x="334.40732"
+         xml:space="preserve">Master Process</text>
+      <path
+         id="path3174"
+         d="m 730.41947,194.37342 0,39.87244 -1.87547,0 0,-39.87244 z m 4.27606,32.85819 -5.2138,8.88971 -5.17629,-8.88971 c -0.26256,-0.45011 -0.11253,-1.05026 0.33759,-1.31283 0.45011,-0.26256 1.01275,-0.075 1.27531,0.33759 l 4.3886,7.50187 -1.6129,0 4.38859,-7.50187 c 0.22506,-0.41261 0.8252,-0.60015 1.27532,-0.33759 0.45011,0.26257 0.60015,0.86272 0.33758,1.31283 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.03750934px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <path
+         id="path3176"
+         d="m 743.54774,196.21138 0,39.90994 -1.87547,0 0,-39.90994 z m -6.11403,7.05176 5.17629,-8.88972 5.2138,8.88972 c 0.26257,0.45011 0.11253,1.01275 -0.33758,1.27532 -0.45011,0.26256 -1.05026,0.11252 -1.27532,-0.33759 l -4.38859,-7.50187 1.6129,0 -4.38859,7.50187 c -0.26257,0.45011 -0.82521,0.60015 -1.27532,0.33759 -0.45011,-0.26257 -0.60015,-0.82521 -0.33759,-1.27532 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.03750934px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <path
+         id="path3178"
+         d="m 730.41947,326.89394 0,39.87243 -1.87547,0 0,-39.87243 z m 4.27606,32.85818 -5.2138,8.88972 -5.17629,-8.88972 c -0.26256,-0.45011 -0.11253,-1.01275 0.33759,-1.27531 0.45011,-0.26257 1.01275,-0.11253 1.27531,0.33758 l 4.3886,7.50187 -1.6129,0 4.38859,-7.50187 c 0.22506,-0.45011 0.8252,-0.60015 1.27532,-0.33758 0.45011,0.26256 0.60015,0.8252 0.33758,1.27531 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.03750934px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <path
+         id="path3180"
+         d="m 743.54774,328.7694 0,39.87244 -1.87547,0 0,-39.87244 z m -6.11403,7.01425 5.17629,-8.88971 5.2138,8.88971 c 0.26257,0.45011 0.11253,1.01275 -0.33758,1.27532 -0.45011,0.26257 -1.05026,0.11253 -1.27532,-0.33758 l -4.38859,-7.50187 1.6129,0 -4.38859,7.50187 c -0.26257,0.45011 -0.82521,0.60015 -1.27532,0.33758 -0.45011,-0.26257 -0.60015,-0.82521 -0.33759,-1.27532 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.03750934px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <path
+         id="path3182"
+         d="m 614.32804,151.98786 -103.30073,62.04046 -0.93773,-1.6129 103.26322,-62.04046 z m -95.08618,62.07797 -10.27756,0.11253 4.95123,-9.03975 c 0.26256,-0.45012 0.8252,-0.60015 1.27532,-0.33759 0.45011,0.22506 0.60015,0.7877 0.37509,1.27532 l 0,0 -4.20105,7.6144 -0.8252,-1.38785 8.70217,-0.11253 c 0.48762,-0.0375 0.93773,0.3751 0.93773,0.90023 0,0.52513 -0.4126,0.93773 -0.93773,0.97524 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.03750934px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <path
+         id="path3184"
+         d="m 620.74214,166.24141 -103.30073,62.07797 -0.93774,-1.6129 103.26323,-62.04046 z m -9.18979,-1.6129 10.31507,-0.15003 -4.95123,9.03975 c -0.26257,0.45011 -0.82521,0.63766 -1.27532,0.37509 -0.45011,-0.26256 -0.63766,-0.8252 -0.37509,-1.27532 l 4.16354,-7.61439 0.8252,1.38784 -8.66466,0.11253 c -0.52513,0 -0.93773,-0.4126 -0.93773,-0.93773 -0.0375,-0.48762 0.37509,-0.93774 0.90022,-0.93774 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.03750934px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <path
+         id="path3186"
+         d="m 629.29427,273.29308 -120.48001,0 0,-1.87546 120.48001,0 z m -113.46576,4.23856 -8.88972,-5.17629 8.88972,-5.17629 c 0.45011,-0.26256 1.01275,-0.11253 1.27531,0.33759 0.26257,0.45011 0.11253,1.01275 -0.33758,1.27531 l 0,0 -7.50187,4.3886 0,-1.65042 7.50187,4.3886 c 0.45011,0.26256 0.60015,0.8252 0.33758,1.27532 -0.26256,0.45011 -0.8252,0.60015 -1.27531,0.33758 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.03750934px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <path
+         id="path3188"
+         d="m 627.41881,288.93448 -120.48002,0 0,-1.87547 120.48002,0 z m -7.01425,-6.15153 8.88971,5.2138 -8.88971,5.17629 c -0.45011,0.26256 -1.01275,0.11252 -1.27532,-0.33759 -0.26256,-0.45011 -0.11253,-1.01275 0.33758,-1.27532 l 7.50187,-4.38859 0,1.6129 -7.50187,-4.35108 c -0.45011,-0.26257 -0.60014,-0.86272 -0.33758,-1.31283 0.26257,-0.45011 0.82521,-0.60015 1.27532,-0.33758 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.03750934px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <path
+         id="path3190"
+         d="m 620.25452,393.28548 -104.351,-60.24001 0.93774,-1.6129 104.35099,60.24001 z m -100.375,-53.03822 -5.13879,-8.92722 10.31507,-0.075 c 0.52514,0 0.93774,0.4126 0.93774,0.93774 0,0.52513 -0.4126,0.93773 -0.93774,0.93773 l 0,0 -8.66465,0.0375 0.78769,-1.38785 4.31358,7.53938 c 0.26256,0.45011 0.11252,1.01275 -0.33759,1.27532 -0.45011,0.26256 -1.05026,0.11253 -1.27531,-0.33759 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.03750934px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <path
+         id="path3192"
+         d="m 610.87718,405.8136 -104.35099,-60.24001 0.93773,-1.6129 104.351,60.24001 z m -3.03825,-8.8147 5.10127,8.96474 -10.27756,0.0375 c -0.52513,0 -0.93774,-0.41261 -0.93774,-0.93774 0,-0.52513 0.41261,-0.93773 0.93774,-0.93773 l 8.66466,-0.0375 -0.7877,1.38784 -4.31357,-7.53937 c -0.26257,-0.45012 -0.11253,-1.01276 0.33758,-1.27532 0.45011,-0.26257 1.01275,-0.11253 1.27532,0.33758 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.03750934px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <text
+         id="text3194"
+         style="font-size:19.95497131px;font-style:italic;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="254.02786"
+         x="506.90329"
+         xml:space="preserve">RPC / BLOB</text>
+      <text
+         id="text3196"
+         style="font-size:19.95497131px;font-style:italic;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="222.93466"
+         x="772.27954"
+         xml:space="preserve">Data Plane</text>
+      <text
+         id="text3198"
+         style="font-size:19.95497131px;font-style:italic;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="355.45471"
+         x="772.27954"
+         xml:space="preserve">Data Plane</text>
+      <path
+         id="path3200"
+         d="m 300.93747,462.86531 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-5.02625 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-5.02625 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-5.02625 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-5.02626 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-5.02625 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-5.02625 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-5.02625 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-5.02625 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-5.02626 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-5.02625 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-5.02625 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-5.02625 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-5.02625 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-5.02626 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-5.02625 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-5.02625 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-5.02625 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-5.02626 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-5.02625 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-5.02625 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-5.02625 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-5.02625 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-5.02626 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75093 1.27532,0 0,3.75093 -1.27532,0 z m 0,-4.98874 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-5.02625 0,-3.75094 1.27532,0 0,3.75094 -1.27532,0 z m 0,-4.98875 0,-3.75093 1.27532,0 0,1.23781 -0.63766,0 0.63766,-0.600152 0,3.113272 -1.27532,0 z m 2.51313,-3.75093 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02626,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02626,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02626,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02626,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02626,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02626,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02626,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02626,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98874,0 3.75094,0 0,1.23781 -3.75094,0 0,-1.23781 z m 4.98875,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 5.02625,0 3.75093,0 0,1.23781 -3.75093,0 0,-1.23781 z m 4.98874,0 2.17554,0 0,2.8132 -1.2378,0 0,-2.175542 0.63765,0.600152 -1.57539,0 0,-1.23781 z m 2.17554,4.05101 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98875 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,5.02626 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,5.02625 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98875 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98875 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98875 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,5.02626 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,5.02625 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98875 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98875 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,5.02626 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,5.02625 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98875 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98875 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98875 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,5.02626 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,5.02625 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98875 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98875 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,5.02626 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,5.02625 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98875 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m 0,4.98875 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,5.02625 0,3.75093 -1.2378,0 0,-3.75093 1.2378,0 z m 0,4.98874 0,3.75094 -1.2378,0 0,-3.75094 1.2378,0 z m -0.93773,5.32633 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02626,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02626,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02626,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02626,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02626,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02626,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02626,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02626,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98875,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -5.02625,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02626,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75093,0 0,-1.27532 3.75093,0 0,1.27532 z m -4.98874,0 -3.75094,0 0,-1.27532 3.75094,0 0,1.27532 z m -5.02625,0 -3.11328,0 0,-1.27532 3.11328,0 0,1.27532 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.03750934px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <path
+         id="path3202"
+         d="m 293.13553,229.20085 0,101.91289 29.68864,0 0,-101.91289 -29.68864,0 z"
+         style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         inkscape:connector-curvature="0" />
+      <path
+         id="path3204"
+         d="m 293.13553,229.20085 29.68864,0 0,101.91289 -29.68864,0 z"
+         style="fill:none;stroke:#000000;stroke-width:1.25656307px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <text
+         id="text3206"
+         style="font-size:22.5056076px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Calibri"
+         y="302.33319"
+         x="315.52737"
+         xml:space="preserve">REST</text>
+      <text
+         id="text3208"
+         style="font-size:19.95497131px;font-style:italic;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="130.10928"
+         x="316.13495"
+         xml:space="preserve">Internal</text>
+      <text
+         id="text3210"
+         style="font-size:19.95497131px;font-style:italic;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="130.10928"
+         x="200.44704"
+         xml:space="preserve">External</text>
+      <path
+         id="path3212"
+         d="m 60.258762,197.50545 c 0,-4.50112 3.647784,-8.15828 8.148905,-8.15828 l 119.664183,0 c 4.5105,0 8.15829,3.65716 8.15829,8.15828 l 0,32.61438 c 0,4.50112 -3.64779,8.1489 -8.15829,8.1489 l -119.664183,0 c -4.501121,0 -8.148905,-3.64778 -8.148905,-8.1489 z"
+         style="fill:none;stroke:#7f7f7f;stroke-width:1.24718571px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <text
+         id="text3214"
+         style="font-size:19.95497131px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="221.69411"
+         x="87.575058"
+         xml:space="preserve">Browser</text>
+      <path
+         id="path3216"
+         d="m 60.258762,260.51178 c 0,-4.5105 3.657161,-8.17704 8.177037,-8.17704 l 119.617301,0 c 4.5105,0 8.17704,3.66654 8.17704,8.17704 l 0,32.71752 c 0,4.51988 -3.66654,8.17704 -8.17704,8.17704 l -119.617301,0 c -4.519876,0 -8.177037,-3.65716 -8.177037,-8.17704 z"
+         style="fill:none;stroke:#7f7f7f;stroke-width:1.24718571px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <text
+         id="text3218"
+         style="font-size:19.95497131px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="284.72891"
+         x="111.5811"
+         xml:space="preserve">CLI</text>
+      <path
+         id="path3220"
+         d="m 60.258762,323.49934 c 0,-4.51987 3.657161,-8.17704 8.177037,-8.17704 l 119.617301,0 c 4.51987,0 8.17704,3.65717 8.17704,8.17704 l 0,32.7269 c 0,4.51988 -3.65717,8.17704 -8.17704,8.17704 l -119.617301,0 c -4.519876,0 -8.177037,-3.65716 -8.177037,-8.17704 z"
+         style="fill:none;stroke:#7f7f7f;stroke-width:1.25656307px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <text
+         id="text3222"
+         style="font-size:19.95497131px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="347.7637"
+         x="103.17585"
+         xml:space="preserve">Tools</text>
+      <path
+         id="path3224"
+         d="m 273.97763,266.09129 -59.94931,0 0,-1.87547 59.94931,0 z m -52.92569,4.24793 -8.88971,-5.18566 8.88971,-5.18567 c 0.45012,-0.26257 1.02213,-0.11253 1.2847,0.33758 0.26257,0.44074 0.11253,1.02213 -0.33758,1.2847 l -7.50187,4.36984 0,-1.62228 7.50187,4.37921 c 0.45011,0.26257 0.60015,0.83459 0.33758,1.2847 -0.26257,0.45011 -0.83458,0.60015 -1.2847,0.33758 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.00937734px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <path
+         id="path3226"
+         d="m 272.13029,295.94873 -59.95868,0 0,-1.87547 59.95868,0 z m -7.033,-6.13278 8.88972,5.19504 -8.88972,5.17629 c -0.45011,0.26257 -1.0315,0.11253 -1.29407,-0.33758 -0.24381,-0.45011 -0.0938,-1.01275 0.33758,-1.27532 l 7.50187,-4.36984 0,1.61291 -7.50187,-4.36984 c -0.43135,-0.26257 -0.58139,-0.84396 -0.33758,-1.29408 0.26257,-0.45011 0.84396,-0.60014 1.29407,-0.33758 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875467px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
+         inkscape:connector-curvature="0" />
+      <text
+         id="text3228"
+         style="font-size:19.95497131px;font-style:italic;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="288.96088"
+         x="218.86954"
+         xml:space="preserve">HTTP</text>
+      <path
+         id="path3230"
+         d="m 330.47608,258.3081 c 0,-6.15153 4.98874,-11.12152 11.14028,-11.12152 l 126.68781,0 c 6.15153,0 11.12152,4.96999 11.12152,11.12152 l 0,44.48608 c 0,6.15154 -4.96999,11.12152 -11.12152,11.12152 l -126.68781,0 c -6.15154,0 -11.14028,-4.96998 -11.14028,-11.12152 z"
+         style="fill:#afabab;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         inkscape:connector-curvature="0" />
+      <text
+         id="text3232"
+         style="font-size:19.95497131px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Verdana"
+         y="288.43024"
+         x="351.6857"
+         xml:space="preserve">Dispatcher</text>
+    </g>
+  </g>
+</svg>
diff --git a/docs/fig/tumbling-window-join.svg b/docs/fig/tumbling-window-join.svg
new file mode 100644
index 00000000000..9efb58c3173
--- /dev/null
+++ b/docs/fig/tumbling-window-join.svg
@@ -0,0 +1,131 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+<svg width="1000px" height="350px" viewBox="0 0 1000 350" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" style="background: #FFFFFF;">
+    <!-- Generator: Sketch 48.2 (47327) - http://www.bohemiancoding.com/sketch -->
+    <title>Tumbling Window Join</title>
+    <desc>Created with Sketch.</desc>
+    <defs></defs>
+    <g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
+        <g id="Tumbling-Window-Join">
+            <text id="time" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                <tspan x="859" y="210">time</tspan>
+            </text>
+            <g id="Group-3" transform="translate(199.000000, 48.000000)">
+                <g id="Group-2" transform="translate(17.000000, 88.000000)">
+                    <g id="StreamRecord" transform="translate(159.714286, 0.000000)">
+                        <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                        <text id="2" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                            <tspan x="8" y="18">2</tspan>
+                        </text>
+                    </g>
+                    <g id="StreamRecord-Copy" transform="translate(239.571429, 0.000000)">
+                        <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                        <text id="3" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                            <tspan x="8" y="18">3</tspan>
+                        </text>
+                    </g>
+                    <g id="StreamRecord-Copy-2" transform="translate(319.428571, 0.000000)">
+                        <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                        <text id="4" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                            <tspan x="8" y="18">4</tspan>
+                        </text>
+                    </g>
+                    <g id="StreamRecord-Copy-3" transform="translate(399.285714, 0.000000)">
+                        <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                        <text id="5" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                            <tspan x="8" y="18">5</tspan>
+                        </text>
+                    </g>
+                    <g id="StreamRecord-Copy-4" transform="translate(479.142857, 0.000000)">
+                        <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                        <text id="6" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                            <tspan x="8" y="18">6</tspan>
+                        </text>
+                    </g>
+                    <g id="StreamRecord-Copy-5" transform="translate(559.000000, 0.000000)">
+                        <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                        <text id="7" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                            <tspan x="8" y="18">7</tspan>
+                        </text>
+                    </g>
+                    <g id="StreamRecord-Copy-6">
+                        <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                        <text id="0" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                            <tspan x="8" y="18">0</tspan>
+                        </text>
+                    </g>
+                    <g id="StreamRecord-Copy-7" transform="translate(79.857143, 0.000000)">
+                        <circle id="Oval" stroke="#979797" fill="#F5A623" cx="12.5" cy="12.5" r="12.5"></circle>
+                        <text id="1" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                            <tspan x="8" y="18">1</tspan>
+                        </text>
+                    </g>
+                </g>
+                <g id="Group" transform="translate(17.000000, 18.000000)">
+                    <g id="StreamRecord-Copy" transform="translate(239.571429, 0.000000)">
+                        <circle id="Oval" stroke="#979797" fill="#7ED321" cx="12.5" cy="12.5" r="12.5"></circle>
+                        <text id="3" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                            <tspan x="8" y="18">3</tspan>
+                        </text>
+                    </g>
+                    <g id="StreamRecord-Copy-2" transform="translate(319.428571, 0.000000)">
+                        <circle id="Oval" stroke="#979797" fill="#7ED321" cx="12.5" cy="12.5" r="12.5"></circle>
+                        <text id="4" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                            <tspan x="8" y="18">4</tspan>
+                        </text>
+                    </g>
+                    <g id="StreamRecord-Copy-6">
+                        <circle id="Oval" stroke="#979797" fill="#7ED321" cx="12.5" cy="12.5" r="12.5"></circle>
+                        <text id="0" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                            <tspan x="8" y="18">0</tspan>
+                        </text>
+                    </g>
+                    <g id="StreamRecord-Copy-7" transform="translate(79.857143, 0.000000)">
+                        <circle id="Oval" stroke="#979797" fill="#7ED321" cx="12.5" cy="12.5" r="12.5"></circle>
+                        <text id="1" font-family="Roboto-Regular, Roboto" font-size="16" font-weight="normal" fill="#34323B">
+                            <tspan x="8" y="18">1</tspan>
+                        </text>
+                    </g>
+                </g>
+                <polyline id="Path-2" stroke="#979797" points="0 113 0 0 136 0 136 113"></polyline>
+                <polyline id="Path-2-Copy" stroke="#979797" points="161 113 161 0 297 0 297 113"></polyline>
+                <polyline id="Path-2-Copy-2" stroke="#979797" points="325 113 325 0 461 0 461 113"></polyline>
+                <polyline id="Path-2-Copy-3" stroke="#979797" points="483 113 483 0 619 0 619 113"></polyline>
+            </g>
+            <text id="0,0-0,1-1,0-1,1" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#34323B">
+                <tspan x="238" y="232">0,0</tspan>
+                <tspan x="238" y="254">0,1</tspan>
+                <tspan x="238" y="276">1,0</tspan>
+                <tspan x="238" y="298">1,1</tspan>
+            </text>
+            <text id="2,3-3,3" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#34323B">
+                <tspan x="402" y="232">2,3</tspan>
+                <tspan x="402" y="254">3,3</tspan>
+            </text>
+            <text id="4,4-5,4" font-family="AndaleMono, Andale Mono" font-size="20" font-weight="normal" fill="#34323B">
+                <tspan x="559" y="232">4,4</tspan>
+                <tspan x="559" y="254">5,4</tspan>
+            </text>
+            <path d="M93.5,180.5 L874.5,180.5" id="Line" stroke="#979797" stroke-linecap="square"></path>
+            <path id="Line-decoration-1" d="M874.5,180.5 L863.7,177.5 L863.7,183.5 L874.5,180.5 Z" stroke="#979797" stroke-linecap="square"></path>
+        </g>
+    </g>
+</svg>
diff --git a/docs/monitoring/metrics.md b/docs/monitoring/metrics.md
index 55f626ed016..50511dc4d50 100644
--- a/docs/monitoring/metrics.md
+++ b/docs/monitoring/metrics.md
@@ -1243,6 +1243,27 @@ Thus, in order to infer the metric identifier:
       <td>The number of bytes this task reads from a remote source per second.</td>
       <td>Meter</td>
     </tr>
+    <tr>
+      <th rowspan="6"><strong>Task</strong></th>
+      <td>numBuffersInLocal</td>
+      <td>The total number of network buffers this task has read from a local source.</td>
+      <td>Counter</td>
+    </tr>
+    <tr>
+      <td>numBuffersInLocalPerSecond</td>
+      <td>The number of network buffers this task reads from a local source per second.</td>
+      <td>Meter</td>
+    </tr>
+    <tr>
+      <td>numBuffersInRemote</td>
+      <td>The total number of network buffers this task has read from a remote source.</td>
+      <td>Counter</td>
+    </tr>
+    <tr>
+      <td>numBuffersInRemotePerSecond</td>
+      <td>The number of network buffers this task reads from a remote source per second.</td>
+      <td>Meter</td>
+    </tr>
     <tr>
       <td>numBytesOut</td>
       <td>The total number of bytes this task has emitted.</td>
@@ -1253,6 +1274,16 @@ Thus, in order to infer the metric identifier:
       <td>The number of bytes this task emits per second.</td>
       <td>Meter</td>
     </tr>
+    <tr>
+      <td>numBuffersOut</td>
+      <td>The total number of network buffers this task has emitted.</td>
+      <td>Counter</td>
+    </tr>
+    <tr>
+      <td>numBuffersOutPerSecond</td>
+      <td>The number of network buffers this task emits per second.</td>
+      <td>Meter</td>
+    </tr>
     <tr>
       <th rowspan="6"><strong>Task/Operator</strong></th>
       <td>numRecordsIn</td>
@@ -1393,13 +1424,80 @@ Thus, in order to infer the metric identifier:
       </td>
       <td>Gauge</td>
     </tr>
+    <tr>
+      <th rowspan="1">Operator</th>
+      <td>sleepTimeMillis</td>
+      <td>stream, shardId</td>
+      <td>The number of milliseconds the consumer spends sleeping before fetching records from Kinesis.
+      A particular shard's metric can be specified by stream name and shard id.
+      </td>
+      <td>Gauge</td>
+    </tr>
+    <tr>
+      <th rowspan="1">Operator</th>
+      <td>maxNumberOfRecordsPerFetch</td>
+      <td>stream, shardId</td>
+      <td>The maximum number of records requested by the consumer in a single getRecords call to Kinesis. If ConsumerConfigConstants.SHARD_USE_ADAPTIVE_READS
+      is set to true, this value is adaptively calculated to maximize the 2 Mbps read limits from Kinesis.
+      </td>
+      <td>Gauge</td>
+    </tr>
+    <tr>
+      <th rowspan="1">Operator</th>
+      <td>numberOfAggregatedRecordsPerFetch</td>
+      <td>stream, shardId</td>
+      <td>The number of aggregated Kinesis records fetched by the consumer in a single getRecords call to Kinesis.
+      </td>
+      <td>Gauge</td>
+    </tr>
+    <tr>
+      <th rowspan="1">Operator</th>
+      <td>numberOfDeggregatedRecordsPerFetch</td>
+      <td>stream, shardId</td>
+      <td>The number of deaggregated Kinesis records fetched by the consumer in a single getRecords call to Kinesis.
+      </td>
+      <td>Gauge</td>
+    </tr>
+    <tr>
+      <th rowspan="1">Operator</th>
+      <td>averageRecordSizeBytes</td>
+      <td>stream, shardId</td>
+      <td>The average size of a Kinesis record in bytes, fetched by the consumer in a single getRecords call.
+      </td>
+      <td>Gauge</td>
+    </tr>
+    <tr>
+      <th rowspan="1">Operator</th>
+      <td>runLoopTimeNanos</td>
+      <td>stream, shardId</td>
+      <td>The actual time taken, in nanoseconds, by the consumer in the run loop.
+      </td>
+      <td>Gauge</td>
+    </tr>
+    <tr>
+      <th rowspan="1">Operator</th>
+      <td>loopFrequencyHz</td>
+      <td>stream, shardId</td>
+      <td>The number of calls to getRecords in one second. 
+      </td>
+      <td>Gauge</td>
+    </tr>
+    <tr>
+      <th rowspan="1">Operator</th>
+      <td>bytesRequestedPerFetch</td>
+      <td>stream, shardId</td>
+      <td>The bytes requested (2 Mbps / loopFrequencyHz) in a single call to getRecords.
+      </td>
+      <td>Gauge</td>
+    </tr>
   </tbody>
 </table>
 
 ## Latency tracking
 
-Flink allows to track the latency of records traveling through the system. To enable the latency tracking
-a `latencyTrackingInterval` (in milliseconds) has to be set to a positive value in the `ExecutionConfig`.
+Flink allows to track the latency of records traveling through the system. This feature is disabled by default.
+To enable the latency tracking you must set the `latencyTrackingInterval` to a positive number in either the
+[Flink configuration]({{ site.baseurl }}/ops/config.html#metrics-latency-interval) or `ExecutionConfig`.
 
 At the `latencyTrackingInterval`, the sources will periodically emit a special record, called a `LatencyMarker`.
 The marker contains a timestamp from the time when the record has been emitted at the sources.
@@ -1419,6 +1517,9 @@ latency issues caused by individual machines.
 Currently, Flink assumes that the clocks of all machines in the cluster are in sync. We recommend setting
 up an automated clock synchronisation service (like NTP) to avoid false latency results.
 
+<span class="label label-danger">Warning</span> Enabling latency metrics can significantly impact the performance
+of the cluster. It is highly recommended to only use them for debugging purposes.
+
 ## REST API integration
 
 Metrics can be queried through the [Monitoring REST API]({{ site.baseurl }}/monitoring/rest_api.html).
diff --git a/docs/ops/cli.md b/docs/ops/cli.md
index 6974a2d211e..9af5c5b5333 100644
--- a/docs/ops/cli.md
+++ b/docs/ops/cli.md
@@ -257,6 +257,11 @@ Action "run" compiles and runs a program.
      -s,--fromSavepoint <savepointPath>   Path to a savepoint to restore the job
                                           from (for example
                                           hdfs:///flink/savepoint-1537).
+     -sae,--shutdownOnAttachedExit        If the job is submitted in attached
+                                          mode, perform a best-effort cluster
+                                          shutdown when the CLI is terminated
+                                          abruptly, e.g., in response to a user
+                                          interrupt, such as typing Ctrl + C.
   Options for yarn-cluster mode:
      -d,--detached                        If present, runs the job in detached
                                           mode
@@ -265,6 +270,11 @@ Action "run" compiles and runs a program.
                                           connect to a different JobManager than
                                           the one specified in the
                                           configuration.
+     -sae,--shutdownOnAttachedExit        If the job is submitted in attached
+                                          mode, perform a best-effort cluster
+                                          shutdown when the CLI is terminated
+                                          abruptly, e.g., in response to a user
+                                          interrupt, such as typing Ctrl + C.
      -yD <property=value>                 use value for given property
      -yd,--yarndetached                   If present, runs the job in detached
                                           mode (deprecated; use non-YARN
@@ -272,8 +282,8 @@ Action "run" compiles and runs a program.
      -yh,--yarnhelp                       Help for the Yarn session CLI.
      -yid,--yarnapplicationId <arg>       Attach to running YARN session
      -yj,--yarnjar <arg>                  Path to Flink jar file
-     -yjm,--yarnjobManagerMemory <arg>    Memory for JobManager Container [in
-                                          MB]
+     -yjm,--yarnjobManagerMemory <arg>    Memory for JobManager Container
+                                          with optional unit (default: MB)
      -yn,--yarncontainer <arg>            Number of YARN container to allocate
                                           (=Number of Task Managers)
      -ynm,--yarnname <arg>                Set a custom name for the application
@@ -285,8 +295,8 @@ Action "run" compiles and runs a program.
      -yst,--yarnstreaming                 Start Flink in streaming mode
      -yt,--yarnship <arg>                 Ship files in the specified directory
                                           (t for transfer)
-     -ytm,--yarntaskManagerMemory <arg>   Memory per TaskManager Container [in
-                                          MB]
+     -ytm,--yarntaskManagerMemory <arg>   Memory per TaskManager Container
+                                          with optional unit (default: MB)
      -yz,--yarnzookeeperNamespace <arg>   Namespace to create the Zookeeper
                                           sub-paths for high availability mode
      -ynl,--yarnnodeLabel <arg>           Specify YARN node label for 
diff --git a/docs/ops/config.md b/docs/ops/config.md
index 1e6be19e724..fd0df0c2370 100644
--- a/docs/ops/config.md
+++ b/docs/ops/config.md
@@ -170,12 +170,6 @@ You have to configure `jobmanager.archive.fs.dir` in order to archive terminated
 
 {% include generated/history_server_configuration.html %}
 
-### Slot Manager
-
-The configuration keys in this section are relevant for the SlotManager running in the ResourceManager
-
-{% include generated/slot_manager_configuration.html %}
-
 ## Legacy
 
 - `mode`: Execution mode of Flink. Possible values are `legacy` and `new`. In order to start the legacy components, you have to specify `legacy` (DEFAULT: `new`).
diff --git a/docs/ops/deployment/cluster_setup.md b/docs/ops/deployment/cluster_setup.md
index 75d0c2efff2..bb59f165e27 100644
--- a/docs/ops/deployment/cluster_setup.md
+++ b/docs/ops/deployment/cluster_setup.md
@@ -137,7 +137,7 @@ You can add both JobManager and TaskManager instances to your running cluster wi
 #### Adding a JobManager
 
 {% highlight bash %}
-bin/jobmanager.sh ((start|start-foreground) cluster)|stop|stop-all
+bin/jobmanager.sh ((start|start-foreground) [host] [webui-port])|stop|stop-all
 {% endhighlight %}
 
 #### Adding a TaskManager
diff --git a/docs/ops/deployment/docker.md b/docs/ops/deployment/docker.md
index 4986f2ae7aa..453693d391e 100644
--- a/docs/ops/deployment/docker.md
+++ b/docs/ops/deployment/docker.md
@@ -23,20 +23,24 @@ specific language governing permissions and limitations
 under the License.
 -->
 
-[Docker](https://www.docker.com) is a popular container runtime. There are
-official Docker images for Apache Flink available on Docker Hub which can be
-used directly or extended to better integrate into a production environment.
+[Docker](https://www.docker.com) is a popular container runtime. 
+There are Docker images for Apache Flink available on Docker Hub which can be used to deploy a session cluster.
+The Flink repository also contains tooling to create container images to deploy a job cluster.
 
 * This will be replaced by the TOC
 {:toc}
 
-## Official Docker Images
+## Flink session cluster
+
+A Flink session cluster can be used to run multiple jobs. 
+Each job needs to be submitted to the cluster after it has been deployed. 
+
+### Docker images
 
 The [official Docker repository](https://hub.docker.com/_/flink/) is
 hosted on Docker Hub and serves images of Flink version 1.2.1 and later.
 
-Images for each supported combination of Hadoop and Scala are available, and
-tag aliases are provided for convenience.
+Images for each supported combination of Hadoop and Scala are available, and tag aliases are provided for convenience.
 
 For example, the following aliases can be used: *(`1.2.y` indicates the latest
 release of Flink 1.2)*
@@ -63,13 +67,25 @@ For example:
 **Note:** The docker images are provided as a community project by individuals
 on a best-effort basis. They are not official releases by the Apache Flink PMC.
 
+## Flink job cluster
+
+A Flink job cluster is a dedicated cluster which runs a single job. 
+The job is part of the image and, thus, there is no extra job submission needed. 
+
+### Docker images
+
+The Flink job cluster image needs to contain the user code jars of the job for which the cluster is started.
+Therefore, one needs to build a dedicated container image for every job.
+The `flink-container` module contains a `build.sh` script which can be used to create such an image.
+Please see the [instructions](https://github.com/apache/flink/blob/{{ site.github_branch }}/flink-container/docker/README.md) for more details. 
+
 ## Flink with Docker Compose
 
 [Docker Compose](https://docs.docker.com/compose/) is a convenient way to run a
 group of Docker containers locally.
 
-An [example config file](https://github.com/docker-flink/examples/blob/master/docker-compose.yml)
-is available on GitHub.
+Example config files for a [session cluster](https://github.com/docker-flink/examples/blob/master/docker-compose.yml) and a [job cluster](https://github.com/apache/flink/blob/{{ site.github_branch }}/flink-container/docker/docker-compose.yml)
+are available on GitHub.
 
 ### Usage
 
@@ -85,10 +101,14 @@ is available on GitHub.
 
         docker-compose scale taskmanager=<N>
 
-When the cluster is running, you can visit the web UI at [http://localhost:8081
-](http://localhost:8081) and submit a job.
+* Kill the cluster
+
+        docker-compose kill
+
+When the cluster is running, you can visit the web UI at [http://localhost:8081](http://localhost:8081). 
+You can also use the web UI to submit a job to a session cluster.
 
-To submit a job via the command line, you must copy the JAR to the Jobmanager
+To submit a job to a session cluster via the command line, you must copy the JAR to the JobManager
 container and submit the job from there.
 
 For example:
diff --git a/docs/ops/deployment/kubernetes.md b/docs/ops/deployment/kubernetes.md
index 37489fe948f..5244f5ed544 100644
--- a/docs/ops/deployment/kubernetes.md
+++ b/docs/ops/deployment/kubernetes.md
@@ -23,51 +23,77 @@ specific language governing permissions and limitations
 under the License.
 -->
 
-[Kubernetes](https://kubernetes.io) is a container orchestration system.
+This page describes how to deploy a Flink job and session cluster on [Kubernetes](https://kubernetes.io).
 
 * This will be replaced by the TOC
 {:toc}
 
-## Simple Kubernetes Flink Cluster
+## Setup Kubernetes
 
-A basic Flink cluster deployment in Kubernetes has three components:
+Please follow [Kubernetes' setup guide](https://kubernetes.io/docs/setup/) in order to deploy a Kubernetes cluster.
+If you want to run Kubernetes locally, we recommend using [MiniKube](https://kubernetes.io/docs/setup/minikube/).
 
-* a Deployment for a single Jobmanager
-* a Deployment for a pool of Taskmanagers
-* a Service exposing the Jobmanager's RPC and UI ports
+<div class="alert alert-info" markdown="span">
+  <strong>Note:</strong> If using MiniKube please make sure to execute `minikube ssh 'sudo ip link set docker0 promisc on'` before deploying a Flink cluster. 
+  Otherwise Flink components are not able to self reference themselves through a Kubernetes service. 
+</div>
 
-### Launching the cluster
+## Flink session cluster on Kubernetes
 
-Using the [resource definitions found below](#simple-kubernetes-flink-cluster-
-resources), launch the cluster with the `kubectl` command:
+A Flink session cluster is executed as a long-running Kubernetes Deployment. 
+Note that you can run multiple Flink jobs on a session cluster.
+Each job needs to be submitted to the cluster after the cluster has been deployed.
+
+A basic Flink session cluster deployment in Kubernetes has three components:
+
+* a Deployment/Job which runs the JobManager
+* a Deployment for a pool of TaskManagers
+* a Service exposing the JobManager's REST and UI ports
+
+### Deploy Flink session cluster on Kubernetes
+
+Using the resource definitions for a [session cluster](#session-cluster-resource-definitions), launch the cluster with the `kubectl` command:
 
-    kubectl create -f jobmanager-deployment.yaml
     kubectl create -f jobmanager-service.yaml
+    kubectl create -f jobmanager-deployment.yaml
     kubectl create -f taskmanager-deployment.yaml
 
 You can then access the Flink UI via `kubectl proxy`:
 
 1. Run `kubectl proxy` in a terminal
-2. Navigate to [http://localhost:8001/api/v1/proxy/namespaces/default/services/flink-jobmanager:8081
-](http://localhost:8001/api/v1/proxy/namespaces/default/services/flink-
-jobmanager:8081) in your browser
+2. Navigate to [http://localhost:8001/api/v1/namespaces/default/services/flink-jobmanager:ui/proxy](http://localhost:8001/api/v1/namespaces/default/services/flink-jobmanager:ui/proxy) in your browser
 
-### Deleting the cluster
-
-Again, use `kubectl` to delete the cluster:
+In order to terminate the Flink session cluster, use `kubectl`:
 
     kubectl delete -f jobmanager-deployment.yaml
-    kubectl delete -f jobmanager-service.yaml
     kubectl delete -f taskmanager-deployment.yaml
+    kubectl delete -f jobmanager-service.yaml
+
+## Flink job cluster on Kubernetes
+
+A Flink job cluster is a dedicated cluster which runs a single job. 
+The job is part of the image and, thus, there is no extra job submission needed. 
+
+### Creating the job-specific image
+
+The Flink job cluster image needs to contain the user code jars of the job for which the cluster is started.
+Therefore, one needs to build a dedicated container image for every job.
+Please follow these [instructions](https://github.com/apache/flink/blob/{{ site.github_branch }}/flink-container/docker/README.md) to build the Docker image.
+    
+### Deploy Flink job cluster on Kubernetes
+
+In order to deploy the a job cluster on Kubernetes please follow these [instructions](https://github.com/apache/flink/blob/{{ site.github_branch }}/flink-container/kubernetes/README.md#deploy-flink-job-cluster).
 
 ## Advanced Cluster Deployment
 
-An early version of a [Flink Helm chart](https://github.com/docker-flink/
-examples) is available on GitHub.
+An early version of a [Flink Helm chart](https://github.com/docker-flink/examples) is available on GitHub.
 
 ## Appendix
 
-### Simple Kubernetes Flink cluster resources
+### Session cluster resource definitions
+
+The Deployment definitions use the pre-built image `flink:latest` which can be found [on Docker Hub](https://hub.docker.com/r/_/flink/).
+The image is built from this [Github repository](https://github.com/docker-flink/docker-flink).
 
 `jobmanager-deployment.yaml`
 {% highlight yaml %}
diff --git a/docs/ops/deployment/mesos.md b/docs/ops/deployment/mesos.md
index aca6f233eb1..1ff8afad74e 100644
--- a/docs/ops/deployment/mesos.md
+++ b/docs/ops/deployment/mesos.md
@@ -59,13 +59,11 @@ or configuration files. For instance, in non-containerized environments, the
 artifact server will provide the Flink binaries. What files will be served
 depends on the configuration overlay used.
 
-### Flink's JobManager and Web Interface
+### Flink's Dispatcher and Web Interface
 
-The Mesos scheduler currently resides with the JobManager but will be started
-independently of the JobManager in future versions (see
-[FLIP-6](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=65147077)). The
-proposed changes will also add a Dispatcher component which will be the central
-point for job submission and monitoring.
+The Dispatcher and the web interface provide a central point for monitoring,
+job submission, and other client interaction with the cluster
+(see [FLIP-6](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=65147077)).
 
 ### Startup script and configuration overlays
 
@@ -139,7 +137,7 @@ More information about the deployment scripts can be found [here](http://mesos.a
 
 ### Installing Marathon
 
-Optionally, you may also [install Marathon](https://mesosphere.github.io/marathon/docs/) which will be necessary to run Flink in high availability (HA) mode.
+Optionally, you may also [install Marathon](https://mesosphere.github.io/marathon/docs/) which enables you to run Flink in [high availability (HA) mode](#high-availability).
 
 ### Pre-installing Flink vs Docker/Mesos containers
 
@@ -171,8 +169,6 @@ which manage the Flink processes in a Mesos cluster:
    It is automatically launched by the Mesos worker node to bring up a new TaskManager.
 
 In order to run the `mesos-appmaster.sh` script you have to define `mesos.master` in the `flink-conf.yaml` or pass it via `-Dmesos.master=...` to the Java process.
-Additionally, you should define the number of task managers which are started by Mesos via `mesos.initial-tasks`.
-This value can also be defined in the `flink-conf.yaml` or passed as a Java property.
 
 When executing `mesos-appmaster.sh`, it will create a job manager on the machine where you executed the script.
 In contrast to that, the task managers will be run as Mesos tasks in the Mesos cluster.
@@ -188,19 +184,21 @@ For example:
         -Djobmanager.heap.mb=1024 \
         -Djobmanager.rpc.port=6123 \
         -Drest.port=8081 \
-        -Dmesos.initial-tasks=10 \
         -Dmesos.resourcemanager.tasks.mem=4096 \
         -Dtaskmanager.heap.mb=3500 \
         -Dtaskmanager.numberOfTaskSlots=2 \
         -Dparallelism.default=10
 
+<div class="alert alert-info">
+  <strong>Note:</strong> If Flink is in <a href="{{ site.baseurl }}/ops/config.html#legacy">legacy mode</a>,
+  you should additionally define the number of task managers that are started by Mesos via
+  <a href="{{ site.baseurl }}/ops/config.html#mesos-initial-tasks"><code>mesos.initial-tasks</code></a>.
+</div>
 
 ### High Availability
 
 You will need to run a service like Marathon or Apache Aurora which takes care of restarting the Flink master process in case of node or process failures.
-In addition, Zookeeper needs to be configured like described in the [High Availability section of the Flink docs]({{ site.baseurl }}/ops/jobmanager_high_availability.html)
-
-For the reconciliation of tasks to work correctly, please also set `high-availability.zookeeper.path.mesos-workers` to a valid Zookeeper path.
+In addition, Zookeeper needs to be configured like described in the [High Availability section of the Flink docs]({{ site.baseurl }}/ops/jobmanager_high_availability.html).
 
 #### Marathon
 
@@ -211,7 +209,7 @@ Here is an example configuration for Marathon:
 
     {
         "id": "flink",
-        "cmd": "$FLINK_HOME/bin/mesos-appmaster.sh -Djobmanager.heap.mb=1024 -Djobmanager.rpc.port=6123 -Drest.port=8081 -Dmesos.initial-tasks=1 -Dmesos.resourcemanager.tasks.mem=1024 -Dtaskmanager.heap.mb=1024 -Dtaskmanager.numberOfTaskSlots=2 -Dparallelism.default=2 -Dmesos.resourcemanager.tasks.cpus=1",
+        "cmd": "$FLINK_HOME/bin/mesos-appmaster.sh -Djobmanager.heap.mb=1024 -Djobmanager.rpc.port=6123 -Drest.port=8081 -Dmesos.resourcemanager.tasks.mem=1024 -Dtaskmanager.heap.mb=1024 -Dtaskmanager.numberOfTaskSlots=2 -Dparallelism.default=2 -Dmesos.resourcemanager.tasks.cpus=1",
         "cpus": 1.0,
         "mem": 1024
     }
@@ -220,60 +218,7 @@ When running Flink with Marathon, the whole Flink cluster including the job mana
 
 ### Configuration parameters
 
-`mesos.initial-tasks`: The initial workers to bring up when the master starts (**DEFAULT**: The number of workers specified at cluster startup).
-
-`mesos.constraints.hard.hostattribute`: Constraints for task placement on Mesos based on agent attributes (**DEFAULT**: None).
-Takes a comma-separated list of key:value pairs corresponding to the attributes exposed by the target
-mesos agents.  Example: `az:eu-west-1a,series:t2`
-
-`mesos.maximum-failed-tasks`: The maximum number of failed workers before the cluster fails (**DEFAULT**: Number of initial workers).
-May be set to -1 to disable this feature.
-
-`mesos.master`: The Mesos master URL. The value should be in one of the following forms:
-
-* `host:port`
-* `zk://host1:port1,host2:port2,.../path`
-* `zk://username:password@host1:port1,host2:port2,.../path`
-* `file:///path/to/file`
-
-`mesos.failover-timeout`: The failover timeout in seconds for the Mesos scheduler, after which running tasks are automatically shut down (**DEFAULT:** 600).
-
-`mesos.resourcemanager.artifactserver.port`:The config parameter defining the Mesos artifact server port to use. Setting the port to 0 will let the OS choose an available port.
-
-`mesos.resourcemanager.framework.name`: Mesos framework name (**DEFAULT:** Flink)
-
-`mesos.resourcemanager.framework.role`: Mesos framework role definition (**DEFAULT:** *)
-
-`high-availability.zookeeper.path.mesos-workers`: The ZooKeeper root path for persisting the Mesos worker information.
-
-`mesos.resourcemanager.framework.principal`: Mesos framework principal (**NO DEFAULT**)
-
-`mesos.resourcemanager.framework.secret`: Mesos framework secret (**NO DEFAULT**)
-
-`mesos.resourcemanager.framework.user`: Mesos framework user (**DEFAULT:**"")
-
-`mesos.resourcemanager.artifactserver.ssl.enabled`: Enables SSL for the Flink artifact server (**DEFAULT**: true). Note that `security.ssl.enabled` also needs to be set to `true` encryption to enable encryption.
-
-`mesos.resourcemanager.tasks.mem`: Memory to assign to the Mesos workers in MB (**DEFAULT**: 1024)
-
-`mesos.resourcemanager.tasks.cpus`: CPUs to assign to the Mesos workers (**DEFAULT**: 0.0)
-
-`mesos.resourcemanager.tasks.gpus`: GPUs to assign to the Mesos workers (**DEFAULT**: 0.0)
-
-`mesos.resourcemanager.tasks.container.type`: Type of the containerization used: "mesos" or "docker" (DEFAULT: mesos);
-
-`mesos.resourcemanager.tasks.container.image.name`: Image name to use for the container (**NO DEFAULT**)
-
-`mesos.resourcemanager.tasks.container.volumes`: A comma separated list of `[host_path:]`container_path`[:RO|RW]`. This allows for mounting additional volumes into your container. (**NO DEFAULT**)
-
-`mesos.resourcemanager.tasks.container.docker.parameters`: Custom parameters to be passed into docker run command when using the docker containerizer. Comma separated list of `key=value` pairs. `value` may contain '=' (**NO DEFAULT**)
-
-`mesos.resourcemanager.tasks.uris`: A comma separated list of URIs of custom artifacts to be downloaded into the sandbox of Mesos workers. (**NO DEFAULT**)
-
-`mesos.resourcemanager.tasks.container.docker.force-pull-image`: Instruct the docker containerizer to forcefully pull the image rather than reuse a cached version. (**DEFAULT**: false)
-
-`mesos.resourcemanager.tasks.hostname`: Optional value to define the TaskManager's hostname. The pattern `_TASK_` is replaced by the actual id of the Mesos task. This can be used to configure the TaskManager to use Mesos DNS (e.g. `_TASK_.flink-service.mesos`) for name lookups. (**NO DEFAULT**)
-
-`mesos.resourcemanager.tasks.bootstrap-cmd`: A command which is executed before the TaskManager is started (**NO DEFAULT**).
+For a list of Mesos specific configuration, refer to the [Mesos section]({{ site.baseurl }}/ops/config.html#mesos)
+of the configuration documentation.
 
 {% top %}
diff --git a/docs/ops/deployment/yarn_setup.md b/docs/ops/deployment/yarn_setup.md
index d2fdad9460c..18e693e1c7d 100644
--- a/docs/ops/deployment/yarn_setup.md
+++ b/docs/ops/deployment/yarn_setup.md
@@ -38,7 +38,7 @@ Start a YARN session with 4 Task Managers (each with 4 GB of Heapspace):
 curl -O <flink_hadoop2_download_url>
 tar xvzf flink-{{ site.version }}-bin-hadoop2.tgz
 cd flink-{{ site.version }}/
-./bin/yarn-session.sh -n 4 -jm 1024 -tm 4096
+./bin/yarn-session.sh -n 4 -jm 1024m -tm 4096m
 {% endhighlight %}
 
 Specify the `-s` flag for the number of processing slots per Task Manager. We recommend to set the number of slots to the number of processors per machine.
@@ -53,7 +53,7 @@ Once the session has been started, you can submit jobs to the cluster using the
 curl -O <flink_hadoop2_download_url>
 tar xvzf flink-{{ site.version }}-bin-hadoop2.tgz
 cd flink-{{ site.version }}/
-./bin/flink run -m yarn-cluster -yn 4 -yjm 1024 -ytm 4096 ./examples/batch/WordCount.jar
+./bin/flink run -m yarn-cluster -yn 4 -yjm 1024m -ytm 4096m ./examples/batch/WordCount.jar
 {% endhighlight %}
 
 ## Flink YARN Session
@@ -101,12 +101,12 @@ Usage:
    Optional
      -D <arg>                        Dynamic properties
      -d,--detached                   Start detached
-     -jm,--jobManagerMemory <arg>    Memory for JobManager Container [in MB]
+     -jm,--jobManagerMemory <arg>    Memory for JobManager Container with optional unit (default: MB)
      -nm,--name                      Set a custom name for the application on YARN
      -q,--query                      Display available YARN resources (memory, cores)
      -qu,--queue <arg>               Specify YARN queue.
      -s,--slots <arg>                Number of slots per TaskManager
-     -tm,--taskManagerMemory <arg>   Memory per TaskManager Container [in MB]
+     -tm,--taskManagerMemory <arg>   Memory per TaskManager Container with optional unit (default: MB)
      -z,--zookeeperNamespace <arg>   Namespace to create the Zookeeper sub-paths for HA mode
 {% endhighlight %}
 
@@ -131,8 +131,8 @@ Once Flink is deployed in your YARN cluster, it will show you the connection det
 Stop the YARN session by stopping the unix process (using CTRL+C) or by entering 'stop' into the client.
 
 Flink on YARN will only start all requested containers if enough resources are available on the cluster. Most YARN schedulers account for the requested memory of the containers,
-some account also for the number of vcores. By default, the number of vcores is equal to the processing slots (`-s`) argument. The `yarn.containers.vcores` allows overwriting the
-number of vcores with a custom value.
+some account also for the number of vcores. By default, the number of vcores is equal to the processing slots (`-s`) argument. The [`yarn.containers.vcores`]({{ site.baseurl }}/ops/config.html#yarn-containers-vcores) allows overwriting the
+number of vcores with a custom value. In order for this parameter to work you should enable CPU scheduling in your cluster.
 
 #### Detached YARN Session
 
diff --git a/docs/ops/security-ssl.md b/docs/ops/security-ssl.md
index 1a3c3810250..4e3716218d2 100644
--- a/docs/ops/security-ssl.md
+++ b/docs/ops/security-ssl.md
@@ -22,19 +22,120 @@ specific language governing permissions and limitations
 under the License.
 -->
 
-This page provides instructions on how to enable SSL for the network communication between different Flink components.
+* ToC
+{:toc}
 
-## SSL Configuration
+This page provides instructions on how to enable TLS/SSL authentication and encryption for network communication with and between Flink processes.
 
-SSL can be enabled for all network communication between Flink components. SSL keystores and truststore has to be deployed on each Flink node and configured (conf/flink-conf.yaml) using keys in the security.ssl.* namespace (Please see the [configuration page](config.html) for details). SSL can be selectively enabled/disabled for different transports using the following flags. These flags are only applicable when security.ssl.enabled is set to true.
+## Internal and External Connectivity
 
-* **taskmanager.data.ssl.enabled**: SSL flag for data communication between task managers
-* **blob.service.ssl.enabled**: SSL flag for blob service client/server communication
-* **akka.ssl.enabled**: SSL flag for akka based control connection between the Flink client, jobmanager and taskmanager 
-* **jobmanager.web.ssl.enabled**: Flag to enable https access to the jobmanager's web frontend
+When securing network connections between machines processes through authentication and encryption, Apache Flink differentiates between *internal* and *external* connectivity.
+*Internal Connectivity* refers to all connections made between Flink processes. These connections run Flink custom protocols. Users never connect directly to internal connectivity endpoints.
+*External / REST Connectivity* endpoints refers to all connections made from the outside to Flink processes. This includes the web UI and REST commands to
+start and control running Flink jobs/applications, including the communication of the Flink CLI with the JobManager / Dispatcher.
 
-**IMPORTANT**
+For more flexibility, security for internal and external connectivity can be enabled and configured separately.
 
+<div style="text-align: center">
+  <img src="{{ site.baseurl }}/fig/ssl_internal_external.svg" alt="Internal and External Connectivity" style="width:75%; padding-top:10px; padding-bottom:10px;" />
+</div>
+
+### Internal Connectivity
+
+Internal connectivity includes:
+
+  - Control messages: RPC between JobManager / TaskManager / Dispatcher / ResourceManager
+  - The data plane: The connections between TaskManagers to exchange data during shuffles, broadcasts, redistribution, etc.
+  - The Blob Service (distribution of libraries and other artifacts).
+
+All internal connections are SSL authenticated and encrypted. The connections use **mutual authentication**, meaning both server
+and client side of each connection need to present the certificate to each other. The certificate acts effectively as a shared
+secret.
+
+A common setup is to generate a dedicated certificate (may be self-signed) for a Flink deployment. The certificate for internal communication
+is not needed by any other party to interact with Flink, and can be simply added to the container images, or attached to the YARN deployment.
+
+*Note: Because internal connections are mutually authenticated with shared certificates, Flink can skip hostname verification. This makes container-based setups easier.*
+
+### External / REST Connectivity
+
+All external connectivity is exposed via an HTTP/REST endpoint, used for example by the web UI and the CLI:
+
+  - Communication with the *Dispatcher* to submit jobs (session clusters)
+  - Communication with the *JobManager* to inspect and modify a running job/application
+
+The REST endpoints can be configured to require SSL connections. The server will, however, accept connections from any client by default, meaning the REST endpoint does not authenticate the client.
+
+Simple mutual authentication may be enabled by configuration if authentication of connections to the REST endpoint is required, but we recommend to deploy a "side car proxy":
+Bind the REST endpoint to the loopback interface (or the pod-local interface in Kubernetes) and start a REST proxy that authenticates and forwards the requests to Flink.
+Examples for proxies that Flink users have deployed are [Envoy Proxy](https://www.envoyproxy.io/) or
+[NGINX with MOD_AUTH](http://nginx.org/en/docs/http/ngx_http_auth_request_module.html).
+
+The rationale behind delegating authentication to a proxy is that such proxies offer a wide variety of authentication options and thus better integration into existing infrastructures.
+
+
+### Queryable State
+
+Connections to the queryable state endpoints is currently not authenticated or encrypted.
+
+
+## Configuring SSL
+
+SSL can be enabled separately for *internal* and *external* connectivity:
+
+  - **security.ssl.internal.enabled**: Enable SSL for all *internal* connections.
+  - **security.ssl.rest.enabled**: Enable SSL for *REST / external* connections.
+
+*Note: For backwards compatibility, the **security.ssl.enabled** option still exists and enables SSL for both internal and REST endpoints.*
+
+For internal connectivity, you can optionally disable security for different connection types separately.
+When `security.ssl.internal.enabled` is set to `true`, you can set the following parameters to `false` to disable SSL for that particular connection type:
+
+  - `taskmanager.data.ssl.enabled`: Data communication between TaskManagers
+  - `blob.service.ssl.enabled`: Transport of BLOBs from JobManager to TaskManager
+  - `akka.ssl.enabled`: Akka-based RPC connections between JobManager / TaskManager / ResourceManager
+
+### Keystores and Truststores
+
+The SSL configuration requires to configure a **keystore** and a **truststore**. The *keystore* contains the public certificate
+(public key) and the private key, while the truststore contains the trusted certificates or the trusted authorities. Both stores
+need to be set up such that the truststore trusts the keystore's certificate.
+
+#### Internal Connectivity
+
+Because internal communication is mutually authenticated, keystore and truststore typically contain the same dedicated certificate.
+The certificate can use wild card hostnames or addresses, because the certificate is expected to be a shared secret and host
+names are not verified. It is even possible to use the same file (the keystore) also as the truststore.
+
+{% highlight yaml %}
+security.ssl.internal.keystore: /path/to/file.keystore
+security.ssl.internal.keystore-password: keystore_password
+security.ssl.internal.key-password: key_password
+security.ssl.internal.truststore: /path/to/file.truststore
+security.ssl.internal.truststore-password: truststore_password
+{% endhighlight %}
+
+#### REST Endpoints (external connectivity)
+
+For REST endpoints, by default the keystore is used by the server endpoint, and the truststore is used by the REST clients (including the CLI client)
+to accept the server's certificate. In the case where the REST keystore has a self-signed certificate, the truststore must trust that certificate directly.
+If the REST endpoint uses a certificate that is signed through a proper certification hierarchy, the roots of that hierarchy should
+be in the trust store.
+
+If mutual authentication is enabled, the keystore and the truststore are used by both, the server endpoint and the REST clients as with internal connectivity.
+
+{% highlight yaml %}
+security.ssl.rest.keystore: /path/to/file.keystore
+security.ssl.rest.keystore-password: keystore_password
+security.ssl.rest.key-password: key_password
+security.ssl.rest.truststore: /path/to/file.truststore
+security.ssl.rest.truststore-password: truststore_password
+security.ssl.rest.authentication-enabled: false
+{% endhighlight %}
+
+### Cipher suites
+
+<span class="label label-danger">IMPORTANT</span>
 The [IETF RFC 7525](https://tools.ietf.org/html/rfc7525) recommends to use a specific set of cipher suites for strong security.
 Because these cipher suites were not available on many setups out of the box, Flink's default value is set to a slightly
 weaker but more compatible cipher suite.
@@ -44,115 +145,129 @@ We recommend that SSL setups update to the stronger cipher suites, if possible,
 security.ssl.algorithms: TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
 {% endhighlight %}
 
-If these suites are not supported on your setup, you will see that Flink processes will not be able to connect to each other.
+If these cipher suites are not supported on your setup, you will see that Flink processes will not be able to connect to each other.
 
-## Deploying Keystores and Truststores
+### Complete List of SSL Options
 
-You need to have a Java Keystore generated and copied to each node in the Flink cluster. The common name or subject alternative names in the certificate should match the node's hostname and IP address. Keystores and truststores can be generated using the [keytool utility](https://docs.oracle.com/javase/8/docs/technotes/tools/unix/keytool.html). All Flink components should have read access to the keystore and truststore files.
+{% include generated/security_configuration.html %}
 
-### Example: Creating self signed CA and keystores for a two-node cluster
+## Creating and Deploying Keystores and Truststores
 
-Execute the following keytool commands to create a truststore with a self signed CA.
+Keys, Certificates, and the Keystores and Truststores can be generated using the [keytool utility](https://docs.oracle.com/javase/8/docs/technotes/tools/unix/keytool.html).
+You need to have an appropriate Java Keystore and Truststore accessible from each node in the Flink cluster.
 
-{% highlight bash %}
-keytool -genkeypair -alias ca -keystore ca.keystore -dname "CN=Sample CA" -storepass password -keypass password -keyalg RSA -ext bc=ca:true
-keytool -keystore ca.keystore -storepass password -alias ca -exportcert > ca.cer
-keytool -importcert -keystore ca.truststore -alias ca -storepass password -noprompt -file ca.cer
-{% endhighlight %}
+  - For standalone setups, this means copying the files to each node, or adding them to a shared mounted directory.
+  - For container based setups, add the keystore and truststore files to the container images.
+  - For Yarn/Mesos setups, the cluster deployment phase can automatically distribute the keystore and truststore files.
+
+For the externally facing REST endpoint, the common name or subject alternative names in the certificate should match the node's hostname and IP address.
+
+
+## Example SSL Setup Standalone and Kubernetes
+
+**Internal Connectivity**
 
-Now create keystores for each node with certificates signed by the above CA. Let node1.company.org and node2.company.org be the hostnames with IPs 192.168.1.1 and 192.168.1.2 respectively
+Execute the following keytool commands to create a key pair in a keystore:
 
-#### Node 1
 {% highlight bash %}
-keytool -genkeypair -alias node1 -keystore node1.keystore -dname "CN=node1.company.org" -ext SAN=dns:node1.company.org,ip:192.168.1.1 -storepass password -keypass password -keyalg RSA
-keytool -certreq -keystore node1.keystore -storepass password -alias node1 -file node1.csr
-keytool -gencert -keystore ca.keystore -storepass password -alias ca -ext SAN=dns:node1.company.org,ip:192.168.1.1 -infile node1.csr -outfile node1.cer
-keytool -importcert -keystore node1.keystore -storepass password -file ca.cer -alias ca -noprompt
-keytool -importcert -keystore node1.keystore -storepass password -file node1.cer -alias node1 -noprompt
+keytool -genkeypair -alias flink.internal -keystore internal.keystore -dname "CN=flink.internal" -storepass internal_store_password -keypass internal_key_password -keyalg RSA -keysize 4096
 {% endhighlight %}
 
-#### Node 2
-{% highlight bash %}
-keytool -genkeypair -alias node2 -keystore node2.keystore -dname "CN=node2.company.org" -ext SAN=dns:node2.company.org,ip:192.168.1.2 -storepass password -keypass password -keyalg RSA
-keytool -certreq -keystore node2.keystore -storepass password -alias node2 -file node2.csr
-keytool -gencert -keystore ca.keystore -storepass password -alias ca -ext SAN=dns:node2.company.org,ip:192.168.1.2 -infile node2.csr -outfile node2.cer
-keytool -importcert -keystore node2.keystore -storepass password -file ca.cer -alias ca -noprompt
-keytool -importcert -keystore node2.keystore -storepass password -file node2.cer -alias node2 -noprompt
+The single key/certificate in the keystore is used the same way by the server and client endpoints (mutual authentication).
+The key pair acts as the shared secret for internal security, and we can directly use it as keystore and truststore.
+
+{% highlight yaml %}
+security.ssl.internal.enabled: true
+security.ssl.internal.keystore: /path/to/flink/conf/internal.keystore
+security.ssl.internal.truststore: /path/to/flink/conf/internal.keystore
+security.ssl.internal.keystore-password: internal_store_password
+security.ssl.internal.truststore-password: internal_store_password
+security.ssl.internal.key-password: internal_key_password
 {% endhighlight %}
 
-## Standalone Deployment
-Configure each node in the standalone cluster to pick up the keystore and truststore files present in the local file system.
+**REST Endpoint**
 
-### Example: Two-node cluster
+The REST endpoint may receive connections from external processes, including tools that are not part of Flink (for example curl request to the REST API).
+Setting up a proper certificate that is signed though a CA hierarchy may make sense for the REST endpoint.
 
-* Generate two keystores, one for each node, and copy them to the filesystem on the respective node. Also copy the public key of the CA (which was used to sign the certificates in the keystore) as a Java truststore on both the nodes.
-* Configure conf/flink-conf.yaml to pick up these files.
+However, as mentioned above, the REST endpoint does not authenticate clients and thus typically needs to be secured via a proxy anyways.
 
-#### Node 1
-{% highlight yaml %}
-security.ssl.enabled: true
-security.ssl.keystore: /usr/local/node1.keystore
-security.ssl.keystore-password: password
-security.ssl.key-password: password
-security.ssl.truststore: /usr/local/ca.truststore
-security.ssl.truststore-password: password
+**REST Endpoint (simple self signed certificate)**
+
+This example shows how to create a simple keystore / truststore pair. The truststore does not contain the primary key and can
+be shared with other applications. In this example, *myhost.company.org / ip:10.0.2.15* is the node (or service) for the Flink master.
+
+{% highlight bash %}
+keytool -genkeypair -alias flink.rest -keystore rest.keystore -dname "CN=myhost.company.org" -ext "SAN=dns:myhost.company.org,ip:10.0.2.15" -storepass rest_keystore_password -keypass rest_key_password -keyalg RSA -keysize 4096
+
+keytool -exportcert -keystore rest.keystore -alias flink.rest -storepass rest_keystore_password -file flink.cer
+
+keytool -importcert -keystore rest.truststore -alias flink.rest -storepass rest_truststore_password -file flink.cer -noprompt
 {% endhighlight %}
 
-#### Node 2
 {% highlight yaml %}
-security.ssl.enabled: true
-security.ssl.keystore: /usr/local/node2.keystore
-security.ssl.keystore-password: password
-security.ssl.key-password: password
-security.ssl.truststore: /usr/local/ca.truststore
-security.ssl.truststore-password: password
+security.ssl.rest.enabled: true
+security.ssl.rest.keystore: /path/to/flink/conf/rest.keystore
+security.ssl.rest.truststore: /path/to/flink/conf/rest.truststore
+security.ssl.rest.keystore-password: rest_keystore_password
+security.ssl.rest.truststore-password: rest_truststore_password
+security.ssl.rest.key-password: rest_key_password
 {% endhighlight %}
 
-* Restart the Flink components to enable SSL for all of Flink's internal communication
-* Verify by accessing the jobmanager's UI using https url. The taskmanager's path in the UI should show akka.ssl.tcp:// as the protocol
-* The blob server and taskmanager's data communication can be verified from the log files
+**REST Endpoint (with a self signed CA)**
 
-## YARN Deployment
-The keystores and truststore can be deployed in a YARN setup in multiple ways depending on the cluster setup. Following are two ways to achieve this.
+Execute the following keytool commands to create a truststore with a self signed CA.
 
-### 1. Deploy keystores before starting the YARN session
-The keystores and truststore should be generated and deployed on all nodes in the YARN setup where Flink components can potentially be executed. The same Flink config file from the Flink YARN client is used for all the Flink components running in the YARN cluster. Therefore we need to ensure the keystore is deployed and accessible using the same filepath in all the YARN nodes.
+{% highlight bash %}
+keytool -genkeypair -alias ca -keystore ca.keystore -dname "CN=Sample CA" -storepass ca_keystore_password -keypass ca_key_password -keyalg RSA -keysize 4096 -ext "bc=ca:true"
 
-#### Example config
-{% highlight yaml %}
-security.ssl.enabled: true
-security.ssl.keystore: /usr/local/node.keystore
-security.ssl.keystore-password: password
-security.ssl.key-password: password
-security.ssl.truststore: /usr/local/ca.truststore
-security.ssl.truststore-password: password
+keytool -exportcert -keystore ca.keystore -alias ca -storepass ca_keystore_password -file ca.cer
+
+keytool -importcert -keystore ca.truststore -alias ca -storepass ca_truststore_password -file ca.cer -noprompt
 {% endhighlight %}
 
-Now you can start the YARN session from the CLI like you would normally do.
+Now create a keystore for the REST endpoint with a certificate signed by the above CA.
+Let *flink.company.org / ip:10.0.2.15* be the hostname of the Flink master (JobManager).
 
-### 2. Use YARN CLI to deploy the keystores and truststore
-We can use the YARN client's ship files option (-yt) to distribute the keystores and truststore. Since the same keystore will be deployed at all nodes, we need to ensure a single certificate in the keystore can be served for all nodes. This can be done by either using the Subject Alternative Name (SAN) extension in the certificate and setting it to cover all nodes (hostname and ip addresses) in the cluster or by using wildcard subdomain names (if the cluster is setup accordingly). 
+{% highlight bash %}
+keytool -genkeypair -alias flink.rest -keystore rest.signed.keystore -dname "CN=flink.company.org" -ext "SAN=dns:flink.company.org" -storepass rest_keystore_password -keypass rest_key_password -keyalg RSA -keysize 4096
 
-#### Example
-* Supply the following parameters to the keytool command when generating the keystore: -ext SAN=dns:node1.company.org,ip:192.168.1.1,dns:node2.company.org,ip:192.168.1.2
-* Copy the keystore and the CA's truststore into a local directory (at the CLI's working directory), say deploy-keys/
-* Update the configuration to pick up the files from a relative path
+keytool -certreq -alias flink.rest -keystore rest.signed.keystore -storepass rest_keystore_password -keypass rest_key_password -file rest.csr
 
-{% highlight yaml %}
-security.ssl.enabled: true
-security.ssl.keystore: deploy-keys/node.keystore
-security.ssl.keystore-password: password
-security.ssl.key-password: password
-security.ssl.truststore: deploy-keys/ca.truststore
-security.ssl.truststore-password: password
+keytool -gencert -alias ca -keystore ca.keystore -storepass ca_keystore_password -keypass ca_key_password -ext "SAN=dns:flink.company.org,ip:10.0.2.15" -infile rest.csr -outfile rest.cer
+
+keytool -importcert -keystore rest.signed.keystore -storepass rest_keystore_password -file ca.cer -alias ca -noprompt
+
+keytool -importcert -keystore rest.signed.keystore -storepass rest_keystore_password -keypass rest_key_password -file rest.cer -alias flink.rest -noprompt
 {% endhighlight %}
 
-* Start the YARN session using the -yt parameter
+Now add the following configuration to your `flink-conf.yaml`:
 
-{% highlight bash %}
-flink run -m yarn-cluster -yt deploy-keys/ TestJob.jar
+{% highlight yaml %}
+security.ssl.rest.enabled: true
+security.ssl.rest.keystore: /path/to/flink/conf/rest.signed.keystore
+security.ssl.rest.truststore: /path/to/flink/conf/ca.truststore
+security.ssl.rest.keystore-password: rest_keystore_password
+security.ssl.rest.key-password: rest_key_password
+security.ssl.rest.truststore-password: ca_truststore_password
 {% endhighlight %}
 
-When deployed using YARN, Flink's web dashboard is accessible through YARN proxy's Tracking URL. To ensure that the YARN proxy is able to access Flink's https url you need to configure YARN proxy to accept Flink's SSL certificates. Add the custom CA certificate into Java's default truststore on the YARN Proxy node.
+
+## Tips for YARN / Mesos Deployment
+
+For YARN and Mesos, you can use the tools of Yarn and Mesos to help:
+
+  - Configuring security for internal communication is exactly the same as in the example above.
+
+  - To secure the REST endpoint, you need to issue the REST endpoint's certificate such that it is valid for all hosts
+    that the Flink master may get deployed to. This can be done with a wild card DNS name, or by adding multiple DNS names.
+
+  - The easiest way to deploy keystores and truststore is by YARN client's *ship files* option (`-yt`).
+    Copy the keystore and truststore files into a local directory (say `deploy-keys/`) and start the YARN session as
+    follows: `flink run -m yarn-cluster -yt deploy-keys/ flinkapp.jar`
+
+  - When deployed using YARN, Flink's web dashboard is accessible through YARN proxy's Tracking URL.
+    To ensure that the YARN proxy is able to access Flink's HTTPS URL, you need to configure YARN proxy to accept Flink's SSL certificates.
+    For that, add the custom CA certificate into Java's default truststore on the YARN Proxy node.
 
 {% top %}
diff --git a/docs/ops/state/large_state_tuning.md b/docs/ops/state/large_state_tuning.md
index 6df551f32da..62b3ee557f6 100644
--- a/docs/ops/state/large_state_tuning.md
+++ b/docs/ops/state/large_state_tuning.md
@@ -142,6 +142,17 @@ by default. To enable this feature, users can instantiate a `RocksDBStateBackend
         new RocksDBStateBackend(filebackend, true);
 {% endhighlight %}
 
+**RocksDB Timers**
+
+For RocksDB, a user can chose whether timers are stored on the heap (default) or inside RocksDB. Heap-based timers can have a better performance for smaller numbers of
+timers, while storing timers inside RocksDB offers higher scalability as the number of timers in RocksDB can exceed the available main memory (spilling to disk).
+
+When using RockDB as state backend, the type of timer storage can be selected through Flink's configuration via option key `state.backend.rocksdb.timer-service.factory`.
+Possible choices are `heap` (to store timers on the heap, default) and `rocksdb` (to store timers in RocksDB).
+
+<span class="label label-info">Note</span> *The combination RocksDB state backend / with incremental checkpoint / with heap-based timers currently does NOT support asynchronous snapshots for the timers state.
+Other state like keyed state is still snapshotted asynchronously. Please note that this is not a regression from previous versions and will be resolved with `FLINK-10026`.*
+
 **Passing Options to RocksDB**
 
 {% highlight java %}
@@ -177,11 +188,10 @@ Flink provides some predefined collections of option for RocksDB for different s
 We expect to accumulate more such profiles over time. Feel free to contribute such predefined option profiles when you
 found a set of options that work well and seem representative for certain workloads.
 
-**Important:** RocksDB is a native library, whose allocated memory not from the JVM, but directly from the process'
-native memory. Any memory you assign to RocksDB will have to be accounted for, typically by decreasing the JVM heap size
+<span class="label label-info">Note</span> RocksDB is a native library that allocates memory directly from the process,
+and not from the JVM. Any memory you assign to RocksDB will have to be accounted for, typically by decreasing the JVM heap size
 of the TaskManagers by the same amount. Not doing that may result in YARN/Mesos/etc terminating the JVM processes for
-allocating more memory than configures.
-
+allocating more memory than configured.
 
 ## Capacity Planning
 
@@ -231,7 +241,7 @@ Compression can be activated through the `ExecutionConfig`:
 		executionConfig.setUseSnapshotCompression(true);
 {% endhighlight %}
 
-**Notice:** The compression option has no impact on incremental snapshots, because they are using RocksDB's internal
+<span class="label label-info">Note</span> The compression option has no impact on incremental snapshots, because they are using RocksDB's internal
 format which is always using snappy compression out of the box.
 
 ## Task-Local Recovery
diff --git a/docs/ops/state/savepoints.md b/docs/ops/state/savepoints.md
index 6dd5154c5e6..be653f5b109 100644
--- a/docs/ops/state/savepoints.md
+++ b/docs/ops/state/savepoints.md
@@ -25,17 +25,29 @@ under the License.
 * toc
 {:toc}
 
-## Overview
+## What is a Savepoint? How is a Savepoint different from a Checkpoint?
 
-Savepoints are externally stored self-contained checkpoints that you can use to stop-and-resume or update your Flink programs. They use Flink's [checkpointing mechanism]({{ site.baseurl }}/internals/stream_checkpointing.html) to create a (non-incremental) snapshot of the state of your streaming program and write the checkpoint data and meta data out to an external file system.
-
-This page covers all steps involved in triggering, restoring, and disposing savepoints.
-For more details on how Flink handles state and failures in general, check out the [State in Streaming Programs]({{ site.baseurl }}/dev/stream/state/index.html) page.
+A Savepoint is a consistent image of the execution state of a streaming job, created via Flink's [checkpointing mechanism]({{ site.baseurl }}/internals/stream_checkpointing.html). You can use Savepoints to stop-and-resume, fork,
+or update your Flink jobs. Savepoints consist of two parts: a directory with (typically large) binary files on stable storage (e.g. HDFS, S3, ...) and a (relatively small) meta data file. The files on stable storage represent the net data of the job's execution state
+image. The meta data file of a Savepoint contains (primarily) pointers to all files on stable storage that are part of the Savepoint, in form of absolute paths.
 
 <div class="alert alert-warning">
 <strong>Attention:</strong> In order to allow upgrades between programs and Flink versions, it is important to check out the following section about <a href="#assigning-operator-ids">assigning IDs to your operators</a>.
 </div>
 
+Conceptually, Flink's Savepoints are different from Checkpoints in a similar way that backups are different from recovery logs in traditional database systems. The primary purpose of Checkpoints is to provide a recovery mechanism in case of
+unexpected job failures. A Checkpoint's lifecycle is managed by Flink, i.e. a Checkpoint is created, owned, and released by Flink - without user interaction. As a method of recovery and being periodically triggered, two main
+design goals for the Checkpoint implementation are i) being as lightweight to create and ii) being as fast to restore from as possible. Optimizations towards those goals can exploit certain properties, e.g. that the job code
+doesn't change between the execution attempts. Checkpoints are usually dropped after the job was terminated by the user (except if explicitly configured as retained Checkpoints).
+
+In contrast to all this, Savepoints are created, owned, and deleted by the user. Their use-case is for planned, manual backup and resume. For example, this could be an update of your Flink version, changing your job graph,
+changing parallelism, forking a second job like for a red/blue deployment, and so on. Of course, Savepoints must survive job termination. Conceptually, Savepoints can be a bit more expensive to produce and restore and focus
+more on portability and support for the previously mentioned changes to the job.
+
+Those conceptual differences aside, the current implementations of Checkpoints and Savepoints are basically using the same code and produce the same „format". However, there is currently one exception from this, and we might
+introduce more differences in the future. The exception are incremental checkpoints with the RocksDB state backend. They are using some RocksDB internal format instead of Flink’s native savepoint format. This makes them the
+first instance of a more lightweight checkpointing mechanism, compared to Savepoints.
+
 ## Assigning Operator IDs
 
 It is **highly recommended** that you adjust your programs as described in this section in order to be able to upgrade your programs in the future. The main required change is to manually specify operator IDs via the **`uid(String)`** method. These IDs are used to scope the state of each operator.
@@ -106,6 +118,11 @@ Please follow <a href="https://issues.apache.org/jira/browse/FLINK-5778">FLINK-5
 
 Note that if you use the `MemoryStateBackend`, metadata *and* savepoint state will be stored in the `_metadata` file. Since it is self-contained, you may move the file and restore from any location.
 
+<div class="alert alert-warning">
+  <strong>Attention:</strong> It is discouraged to move or delete last savepoint of a running job, cause this might interfere with failure-recovery. Savepoints have side-effects on exactly-once sinks, therefore 
+  to ensure exactly-once semantics, if there is no checkpoint after the last savepoint, the savepoint will be used for recovery. 
+</div>
+
 #### Trigger a Savepoint
 
 {% highlight shell %}
@@ -211,4 +228,10 @@ If the savepoint was triggered with Flink >= 1.2.0 and using no deprecated state
 
 If you are resuming from a savepoint triggered with Flink < 1.2.0 or using now deprecated APIs you first have to migrate your job and savepoint to Flink >= 1.2.0 before being able to change the parallelism. See the [upgrading jobs and Flink versions guide]({{ site.baseurl }}/ops/upgrading.html).
 
+### Can I move the Savepoint files on stable storage?
+
+The quick answer to this question is currently "no" because the meta data file references the files on stable storage as absolute paths for technical reasons. The longer answer is: if you MUST move the files for some reason there are two
+potential approaches as workaround. First, simpler but potentially more dangerous, you can use an editor to find the old path in the meta data file and replace them with the new path. Second, you can use the class
+SavepointV2Serializer as starting point to programmatically read, manipulate, and rewrite the meta data file with the new paths.
+
 {% top %}
diff --git a/docs/ops/state/state_backends.md b/docs/ops/state/state_backends.md
index 3d4ce586759..4e09b345e10 100644
--- a/docs/ops/state/state_backends.md
+++ b/docs/ops/state/state_backends.md
@@ -116,7 +116,8 @@ The RocksDBStateBackend is encouraged for:
 Note that the amount of state that you can keep is only limited by the amount of disk space available.
 This allows keeping very large state, compared to the FsStateBackend that keeps state in memory.
 This also means, however, that the maximum throughput that can be achieved will be lower with
-this state backend.
+this state backend. All reads/writes from/to this backend have to go through de-/serialization to retrieve/store the state objects, which is also more expensive than always working with the
+on-heap representation as the heap-based backends are doing.
 
 RocksDBStateBackend is currently the only backend that offers incremental checkpoints (see [here](large_state_tuning.html)). 
 
diff --git a/docs/ops/upgrading.md b/docs/ops/upgrading.md
index 475dd402719..eb00a2b28c6 100644
--- a/docs/ops/upgrading.md
+++ b/docs/ops/upgrading.md
@@ -209,6 +209,9 @@ Savepoints are compatible across Flink versions as indicated by the table below:
       <th class="text-center">1.1.x</th>
       <th class="text-center">1.2.x</th>
       <th class="text-center">1.3.x</th>
+      <th class="text-center">1.4.x</th>
+      <th class="text-center">1.5.x</th>
+      <th class="text-center">1.6.x</th>
       <th class="text-center">Limitations</th>
     </tr>
   </thead>
@@ -218,7 +221,10 @@ Savepoints are compatible across Flink versions as indicated by the table below:
           <td class="text-center">O</td>
           <td class="text-center">O</td>
           <td class="text-center">O</td>
-          <td class="text-left">The maximum parallelism of a job that was migrated from Flink 1.1.x to 1.2.x is
+          <td class="text-center"></td>
+          <td class="text-center"></td>
+          <td class="text-center"></td>
+          <td class="text-left">The maximum parallelism of a job that was migrated from Flink 1.1.x to 1.2.x+ is
           currently fixed as the parallelism of the job. This means that the parallelism can not be increased after
           migration. This limitation might be removed in a future bugfix release.</td>
     </tr>
@@ -227,15 +233,51 @@ Savepoints are compatible across Flink versions as indicated by the table below:
           <td class="text-center"></td>
           <td class="text-center">O</td>
           <td class="text-center">O</td>
-          <td class="text-left">When migrating from Flink 1.2.x to Flink 1.3.x, changing parallelism at the same
-          time is not supported. Users have to first take a savepoint after migrating to Flink 1.3.x, and then change
-          parallelism.</td>
+          <td class="text-center">O</td>
+          <td class="text-center">O</td>
+          <td class="text-center">O</td>
+          <td class="text-left">When migrating from Flink 1.2.x to Flink 1.3.x+, changing parallelism at the same
+          time is not supported. Users have to first take a savepoint after migrating to Flink 1.3.x+, and then change
+          parallelism. Savepoints created for CEP applications cannot be restored in 1.4.x+.</td>
     </tr>
     <tr>
           <td class="text-center"><strong>1.3.x</strong></td>
           <td class="text-center"></td>
           <td class="text-center"></td>
           <td class="text-center">O</td>
+          <td class="text-center">O</td>
+          <td class="text-center">O</td>
+          <td class="text-center">O</td>
+          <td class="text-left">Migrating from Flink 1.3.0 to Flink 1.4.[0,1] will fail if the savepoint contains Scala case classes. Users have to directly migrate to 1.4.2+ instead.</td>
+    </tr>
+    <tr>
+          <td class="text-center"><strong>1.4.x</strong></td>
+          <td class="text-center"></td>
+          <td class="text-center"></td>
+          <td class="text-center"></td>
+          <td class="text-center">O</td>
+          <td class="text-center">O</td>
+          <td class="text-center">O</td>
+          <td class="text-left"></td>
+    </tr>
+    <tr>
+          <td class="text-center"><strong>1.5.x</strong></td>
+          <td class="text-center"></td>
+          <td class="text-center"></td>
+          <td class="text-center"></td>
+          <td class="text-center"></td>
+          <td class="text-center">O</td>
+          <td class="text-center">O</td>
+          <td class="text-left"></td>
+    </tr>
+    <tr>
+          <td class="text-center"><strong>1.6.x</strong></td>
+          <td class="text-center"></td>
+          <td class="text-center"></td>
+          <td class="text-center"></td>
+          <td class="text-center"></td>
+          <td class="text-center"></td>
+          <td class="text-center">O</td>
           <td class="text-left"></td>
     </tr>
   </tbody>
diff --git a/docs/release-notes/flink-1.5.md b/docs/release-notes/flink-1.5.md
index 4cee5774cdf..8a5fb69861c 100644
--- a/docs/release-notes/flink-1.5.md
+++ b/docs/release-notes/flink-1.5.md
@@ -22,6 +22,15 @@ under the License.
 
 These release notes discuss important aspects, such as configuration, behavior, or dependencies, that changed between Flink 1.4 and Flink 1.5. Please read these notes carefully if you are planning to upgrade your Flink version to 1.5.
 
+### Changed syntax of jobmanager.sh script
+
+The `jobmanager.sh` script was reworked which removed the execution mode parameter (`local` vs. `cluster`).
+Now it has the following syntax: `jobmanager.sh ((start|start-foreground) [host] [webui-port])|stop|stop-all`.
+
+Consequently, all external scripts need to remove the execution mode parameter when calling `jobmanager.sh`.
+Otherwise, the execution mode (`local` or `cluster`) will be interpreted as the host name of the started process.
+This can lead to connectivity problems between Flink's components.
+
 ### Update Configuration for Reworked Job Deployment
 
 Flink’s reworked cluster and job deployment component improves the integration with resource managers and enables dynamic resource allocation. One result of these changes is, that you no longer have to specify the number of containers when submitting applications to YARN and Mesos. Flink will automatically determine the number of containers from the parallelism of the application.
diff --git a/docs/release-notes/flink-1.6.md b/docs/release-notes/flink-1.6.md
index 31cd3ac9e56..34cd6135511 100644
--- a/docs/release-notes/flink-1.6.md
+++ b/docs/release-notes/flink-1.6.md
@@ -22,5 +22,13 @@ under the License.
 
 These release notes discuss important aspects, such as configuration, behavior, or dependencies, that changed between Flink 1.5 and Flink 1.6. Please read these notes carefully if you are planning to upgrade your Flink version to 1.6.
 
+### Changed Configuration Default Values
+
+The default value of the slot idle timeout `slot.idle.timeout` is set to the default value of the heartbeat timeout (`50 s`). 
+
+### Changed ElasticSearch 5.x Sink API
+
+Previous APIs in the Flink ElasticSearch 5.x Sink's `RequestIndexer` interface have been deprecated in favor of new signatures. 
+When adding requests to the `RequestIndexer`, the requests now must be of type `IndexRequest`, `DeleteRequest`, or `UpdateRequest`, instead of the base `ActionRequest`. 
 
 {% top %}
diff --git a/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java b/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
index e2a260c5478..ae0052c3c38 100644
--- a/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
+++ b/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
@@ -58,6 +58,7 @@
 import org.apache.flink.util.ExceptionUtils;
 import org.apache.flink.util.FlinkException;
 import org.apache.flink.util.Preconditions;
+import org.apache.flink.util.ShutdownHookUtil;
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Options;
@@ -250,13 +251,22 @@ protected void run(String[] args) throws Exception {
 					LOG.info("Could not properly shut down the client.", e);
 				}
 			} else {
+				final Thread shutdownHook;
 				if (clusterId != null) {
 					client = clusterDescriptor.retrieve(clusterId);
+					shutdownHook = null;
 				} else {
 					// also in job mode we have to deploy a session cluster because the job
 					// might consist of multiple parts (e.g. when using collect)
 					final ClusterSpecification clusterSpecification = customCommandLine.getClusterSpecification(commandLine);
 					client = clusterDescriptor.deploySessionCluster(clusterSpecification);
+					// if not running in detached mode, add a shutdown hook to shut down cluster if client exits
+					// there's a race-condition here if cli is killed before shutdown hook is installed
+					if (!runOptions.getDetachedMode() && runOptions.isShutdownOnAttachedExit()) {
+						shutdownHook = ShutdownHookUtil.addShutdownHook(client::shutDownCluster, client.getClass().getSimpleName(), LOG);
+					} else {
+						shutdownHook = null;
+					}
 				}
 
 				try {
@@ -286,8 +296,11 @@ protected void run(String[] args) throws Exception {
 						} catch (final Exception e) {
 							LOG.info("Could not properly terminate the Flink cluster.", e);
 						}
+						if (shutdownHook != null) {
+							// we do not need the hook anymore as we have just tried to shutdown the cluster.
+							ShutdownHookUtil.removeShutdownHook(shutdownHook, client.getClass().getSimpleName(), LOG);
+						}
 					}
-
 					try {
 						client.shutdown();
 					} catch (Exception e) {
@@ -492,11 +505,10 @@ private static void printJobStatusMessages(List<JobStatusMessage> jobs) {
 		jobsByState.entrySet().stream()
 			.sorted(statusComparator)
 			.map(Map.Entry::getValue).flatMap(List::stream).sorted(startTimeComparator)
-			.forEachOrdered(job -> {
-			System.out.println(dateFormat.format(new Date(job.getStartTime()))
-				+ " : " + job.getJobId() + " : " + job.getJobName()
-				+ " (" + job.getJobState() + ")");
-		});
+			.forEachOrdered(job ->
+				System.out.println(dateFormat.format(new Date(job.getStartTime()))
+					+ " : " + job.getJobId() + " : " + job.getJobName()
+					+ " (" + job.getJobState() + ")"));
 	}
 
 	/**
@@ -827,11 +839,8 @@ protected void executeProgram(PackagedProgram program, ClusterClient<?> client,
 	 * Creates a Packaged program from the given command line options.
 	 *
 	 * @return A PackagedProgram (upon success)
-	 * @throws java.io.FileNotFoundException
-	 * @throws org.apache.flink.client.program.ProgramInvocationException
 	 */
-	protected PackagedProgram buildProgram(ProgramOptions options)
-			throws FileNotFoundException, ProgramInvocationException {
+	PackagedProgram buildProgram(ProgramOptions options) throws FileNotFoundException, ProgramInvocationException {
 		String[] programArgs = options.getProgramArgs();
 		String jarFilePath = options.getJarFilePath();
 		List<URL> classpaths = options.getClasspaths();
@@ -1163,7 +1172,7 @@ else if (new File(CONFIG_DIRECTORY_FALLBACK_2).exists()) {
 	 * @param address Address to write to the configuration
 	 * @param config The configuration to write to
 	 */
-	public static void setJobManagerAddressInConfig(Configuration config, InetSocketAddress address) {
+	static void setJobManagerAddressInConfig(Configuration config, InetSocketAddress address) {
 		config.setString(JobManagerOptions.ADDRESS, address.getHostString());
 		config.setInteger(JobManagerOptions.PORT, address.getPort());
 		config.setString(RestOptions.ADDRESS, address.getHostString());
diff --git a/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontendParser.java b/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontendParser.java
index 0adb8cf75b2..8eb0dd6774e 100644
--- a/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontendParser.java
+++ b/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontendParser.java
@@ -19,6 +19,7 @@
 package org.apache.flink.client.cli;
 
 import org.apache.flink.configuration.CheckpointingOptions;
+import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.DefaultParser;
@@ -62,6 +63,11 @@
 	public static final Option DETACHED_OPTION = new Option("d", "detached", false, "If present, runs " +
 			"the job in detached mode");
 
+	public static final Option SHUTDOWN_IF_ATTACHED_OPTION = new Option(
+		"sae", "shutdownOnAttachedExit", false,
+		"If the job is submitted in attached mode, perform a best-effort cluster shutdown " +
+			"when the CLI is terminated abruptly, e.g., in response to a user interrupt, such as typing Ctrl + C.");
+
 	/**
 	 * @deprecated use non-prefixed variant {@link #DETACHED_OPTION} for both YARN and non-YARN deployments
 	 */
@@ -76,10 +82,10 @@
 			"Address of the JobManager (master) to which to connect. " +
 			"Use this flag to connect to a different JobManager than the one specified in the configuration.");
 
-	static final Option SAVEPOINT_PATH_OPTION = new Option("s", "fromSavepoint", true,
+	public static final Option SAVEPOINT_PATH_OPTION = new Option("s", "fromSavepoint", true,
 			"Path to a savepoint to restore the job from (for example hdfs:///flink/savepoint-1537).");
 
-	static final Option SAVEPOINT_ALLOW_NON_RESTORED_OPTION = new Option("n", "allowNonRestoredState", false,
+	public static final Option SAVEPOINT_ALLOW_NON_RESTORED_OPTION = new Option("n", "allowNonRestoredState", false,
 			"Allow to skip savepoint state that cannot be restored. " +
 					"You need to allow this if you removed an operator from your " +
 					"program that was part of the program when the savepoint was triggered.");
@@ -127,6 +133,7 @@
 
 		LOGGING_OPTION.setRequired(false);
 		DETACHED_OPTION.setRequired(false);
+		SHUTDOWN_IF_ATTACHED_OPTION.setRequired(false);
 		YARN_DETACHED_OPTION.setRequired(false);
 
 		ARGS_OPTION.setRequired(false);
@@ -169,6 +176,7 @@ private static Options getProgramSpecificOptions(Options options) {
 		options.addOption(ARGS_OPTION);
 		options.addOption(LOGGING_OPTION);
 		options.addOption(DETACHED_OPTION);
+		options.addOption(SHUTDOWN_IF_ATTACHED_OPTION);
 		options.addOption(YARN_DETACHED_OPTION);
 		return options;
 	}
@@ -179,6 +187,7 @@ private static Options getProgramSpecificOptionsWithoutDeprecatedOptions(Options
 		options.addOption(PARALLELISM_OPTION);
 		options.addOption(LOGGING_OPTION);
 		options.addOption(DETACHED_OPTION);
+		options.addOption(SHUTDOWN_IF_ATTACHED_OPTION);
 		return options;
 	}
 
@@ -401,6 +410,16 @@ private static void printCustomCliOptions(
 		}
 	}
 
+	public static SavepointRestoreSettings createSavepointRestoreSettings(CommandLine commandLine) {
+		if (commandLine.hasOption(SAVEPOINT_PATH_OPTION.getOpt())) {
+			String savepointPath = commandLine.getOptionValue(SAVEPOINT_PATH_OPTION.getOpt());
+			boolean allowNonRestoredState = commandLine.hasOption(SAVEPOINT_ALLOW_NON_RESTORED_OPTION.getOpt());
+			return SavepointRestoreSettings.forPath(savepointPath, allowNonRestoredState);
+		} else {
+			return SavepointRestoreSettings.none();
+		}
+	}
+
 	// --------------------------------------------------------------------------------------------
 	//  Line Parsing
 	// --------------------------------------------------------------------------------------------
diff --git a/flink-clients/src/main/java/org/apache/flink/client/cli/ProgramOptions.java b/flink-clients/src/main/java/org/apache/flink/client/cli/ProgramOptions.java
index 1acda1b5265..da03d64048c 100644
--- a/flink-clients/src/main/java/org/apache/flink/client/cli/ProgramOptions.java
+++ b/flink-clients/src/main/java/org/apache/flink/client/cli/ProgramOptions.java
@@ -36,8 +36,7 @@
 import static org.apache.flink.client.cli.CliFrontendParser.JAR_OPTION;
 import static org.apache.flink.client.cli.CliFrontendParser.LOGGING_OPTION;
 import static org.apache.flink.client.cli.CliFrontendParser.PARALLELISM_OPTION;
-import static org.apache.flink.client.cli.CliFrontendParser.SAVEPOINT_ALLOW_NON_RESTORED_OPTION;
-import static org.apache.flink.client.cli.CliFrontendParser.SAVEPOINT_PATH_OPTION;
+import static org.apache.flink.client.cli.CliFrontendParser.SHUTDOWN_IF_ATTACHED_OPTION;
 import static org.apache.flink.client.cli.CliFrontendParser.YARN_DETACHED_OPTION;
 
 /**
@@ -59,6 +58,8 @@
 
 	private final boolean detachedMode;
 
+	private final boolean shutdownOnAttachedExit;
+
 	private final SavepointRestoreSettings savepointSettings;
 
 	protected ProgramOptions(CommandLine line) throws CliArgsException {
@@ -115,14 +116,9 @@ else if (args.length > 0) {
 		stdoutLogging = !line.hasOption(LOGGING_OPTION.getOpt());
 		detachedMode = line.hasOption(DETACHED_OPTION.getOpt()) || line.hasOption(
 			YARN_DETACHED_OPTION.getOpt());
+		shutdownOnAttachedExit = line.hasOption(SHUTDOWN_IF_ATTACHED_OPTION.getOpt());
 
-		if (line.hasOption(SAVEPOINT_PATH_OPTION.getOpt())) {
-			String savepointPath = line.getOptionValue(SAVEPOINT_PATH_OPTION.getOpt());
-			boolean allowNonRestoredState = line.hasOption(SAVEPOINT_ALLOW_NON_RESTORED_OPTION.getOpt());
-			this.savepointSettings = SavepointRestoreSettings.forPath(savepointPath, allowNonRestoredState);
-		} else {
-			this.savepointSettings = SavepointRestoreSettings.none();
-		}
+		this.savepointSettings = CliFrontendParser.createSavepointRestoreSettings(line);
 	}
 
 	public String getJarFilePath() {
@@ -153,6 +149,10 @@ public boolean getDetachedMode() {
 		return detachedMode;
 	}
 
+	public boolean isShutdownOnAttachedExit() {
+		return shutdownOnAttachedExit;
+	}
+
 	public SavepointRestoreSettings getSavepointRestoreSettings() {
 		return savepointSettings;
 	}
diff --git a/flink-clients/src/main/java/org/apache/flink/client/deployment/ClusterSpecification.java b/flink-clients/src/main/java/org/apache/flink/client/deployment/ClusterSpecification.java
index 90de955039f..72975d8b9b2 100644
--- a/flink-clients/src/main/java/org/apache/flink/client/deployment/ClusterSpecification.java
+++ b/flink-clients/src/main/java/org/apache/flink/client/deployment/ClusterSpecification.java
@@ -19,8 +19,7 @@
 package org.apache.flink.client.deployment;
 
 import org.apache.flink.configuration.Configuration;
-import org.apache.flink.configuration.JobManagerOptions;
-import org.apache.flink.configuration.MemorySize;
+import org.apache.flink.configuration.ConfigurationUtils;
 import org.apache.flink.configuration.TaskManagerOptions;
 
 /**
@@ -68,8 +67,8 @@ public String toString() {
 	public static ClusterSpecification fromConfiguration(Configuration configuration) {
 		int slots = configuration.getInteger(TaskManagerOptions.NUM_TASK_SLOTS, 1);
 
-		int jobManagerMemoryMb = MemorySize.parse(configuration.getString(JobManagerOptions.JOB_MANAGER_HEAP_MEMORY)).getMebiBytes();
-		int taskManagerMemoryMb = MemorySize.parse(configuration.getString(TaskManagerOptions.TASK_MANAGER_HEAP_MEMORY)).getMebiBytes();
+		int jobManagerMemoryMb = ConfigurationUtils.getJobManagerHeapMemory(configuration).getMebiBytes();
+		int taskManagerMemoryMb = ConfigurationUtils.getTaskManagerHeapMemory(configuration).getMebiBytes();
 
 		return new ClusterSpecificationBuilder()
 			.setMasterMemoryMB(jobManagerMemoryMb)
diff --git a/flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java b/flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
index 373267ffc34..521b8104033 100644
--- a/flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
+++ b/flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
@@ -627,7 +627,7 @@ public JobListeningContext connectToJob(JobID jobID) throws JobExecutionExceptio
 
 		Future<Object> response = jobManager.ask(JobManagerMessages.getRequestJobStatus(jobId), timeout);
 
-		CompletableFuture<Object> javaFuture = FutureUtils.<Object>toJava(response);
+		CompletableFuture<Object> javaFuture = FutureUtils.toJava(response);
 
 		return javaFuture.thenApply((responseMessage) -> {
 			if (responseMessage instanceof JobManagerMessages.CurrentJobStatus) {
@@ -734,7 +734,7 @@ public void stop(final JobID jobId) throws Exception {
 
 		Future<Object> response = jobManager.ask(new JobManagerMessages.TriggerSavepoint(jobId, Option.<String>apply(savepointDirectory)),
 			new FiniteDuration(1, TimeUnit.HOURS));
-		CompletableFuture<Object> responseFuture = FutureUtils.<Object>toJava(response);
+		CompletableFuture<Object> responseFuture = FutureUtils.toJava(response);
 
 		return responseFuture.thenApply((responseMessage) -> {
 			if (responseMessage instanceof JobManagerMessages.TriggerSavepointSuccess) {
@@ -754,7 +754,7 @@ public void stop(final JobID jobId) throws Exception {
 		final ActorGateway jobManager = getJobManagerGateway();
 
 		Object msg = new JobManagerMessages.DisposeSavepoint(savepointPath);
-		CompletableFuture<Object> responseFuture = FutureUtils.<Object>toJava(
+		CompletableFuture<Object> responseFuture = FutureUtils.toJava(
 			jobManager.ask(
 				msg,
 				timeout));
@@ -793,7 +793,7 @@ public void stop(final JobID jobId) throws Exception {
 		final ActorGateway jobManager = getJobManagerGateway();
 
 		Future<Object> response = jobManager.ask(new RequestJobDetails(true, false), timeout);
-		CompletableFuture<Object> responseFuture = FutureUtils.<Object>toJava(response);
+		CompletableFuture<Object> responseFuture = FutureUtils.toJava(response);
 
 		return responseFuture.thenApply((responseMessage) -> {
 			if (responseMessage instanceof MultipleJobsDetails) {
diff --git a/flink-clients/src/main/java/org/apache/flink/client/program/MiniClusterClient.java b/flink-clients/src/main/java/org/apache/flink/client/program/MiniClusterClient.java
index 81cf784441d..3077f183acb 100644
--- a/flink-clients/src/main/java/org/apache/flink/client/program/MiniClusterClient.java
+++ b/flink-clients/src/main/java/org/apache/flink/client/program/MiniClusterClient.java
@@ -21,6 +21,7 @@
 import org.apache.flink.api.common.JobID;
 import org.apache.flink.api.common.JobSubmissionResult;
 import org.apache.flink.configuration.Configuration;
+import org.apache.flink.runtime.client.JobExecutionException;
 import org.apache.flink.runtime.client.JobStatusMessage;
 import org.apache.flink.runtime.clusterframework.messages.GetClusterStatusResponse;
 import org.apache.flink.runtime.executiongraph.AccessExecutionGraph;
@@ -94,8 +95,8 @@ public JobSubmissionResult submitJob(JobGraph jobGraph, ClassLoader classLoader)
 
 			try {
 				return jobResult.toJobExecutionResult(classLoader);
-			} catch (JobResult.WrappedJobException e) {
-				throw new ProgramInvocationException("Job failed", jobGraph.getJobID(), e.getCause());
+			} catch (JobExecutionException e) {
+				throw new ProgramInvocationException("Job failed", jobGraph.getJobID(), e);
 			} catch (IOException | ClassNotFoundException e) {
 				throw new ProgramInvocationException("Job failed", jobGraph.getJobID(), e);
 			}
diff --git a/flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgramUtils.java b/flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgramUtils.java
index 94fc109c47b..59ab4065804 100644
--- a/flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgramUtils.java
+++ b/flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgramUtils.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.client.program;
 
+import org.apache.flink.api.common.JobID;
 import org.apache.flink.api.common.Plan;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.core.fs.Path;
@@ -39,18 +40,21 @@
 public class PackagedProgramUtils {
 
 	/**
-	 * Creates a {@link JobGraph} from the given {@link PackagedProgram}.
+	 * Creates a {@link JobGraph} with a specified {@link JobID}
+	 * from the given {@link PackagedProgram}.
 	 *
 	 * @param packagedProgram to extract the JobGraph from
 	 * @param configuration to use for the optimizer and job graph generator
 	 * @param defaultParallelism for the JobGraph
+	 * @param jobID the pre-generated job id
 	 * @return JobGraph extracted from the PackagedProgram
 	 * @throws ProgramInvocationException if the JobGraph generation failed
 	 */
 	public static JobGraph createJobGraph(
 			PackagedProgram packagedProgram,
 			Configuration configuration,
-			int defaultParallelism) throws ProgramInvocationException {
+			int defaultParallelism,
+			JobID jobID) throws ProgramInvocationException {
 		Thread.currentThread().setContextClassLoader(packagedProgram.getUserCodeClassLoader());
 		final Optimizer optimizer = new Optimizer(new DataStatistics(), new DefaultCostEstimator(), configuration);
 		final FlinkPlan flinkPlan;
@@ -79,11 +83,11 @@ public static JobGraph createJobGraph(
 		final JobGraph jobGraph;
 
 		if (flinkPlan instanceof StreamingPlan) {
-			jobGraph = ((StreamingPlan) flinkPlan).getJobGraph();
+			jobGraph = ((StreamingPlan) flinkPlan).getJobGraph(jobID);
 			jobGraph.setSavepointRestoreSettings(packagedProgram.getSavepointSettings());
 		} else {
 			final JobGraphGenerator jobGraphGenerator = new JobGraphGenerator(configuration);
-			jobGraph = jobGraphGenerator.compileJobGraph((OptimizedPlan) flinkPlan);
+			jobGraph = jobGraphGenerator.compileJobGraph((OptimizedPlan) flinkPlan, jobID);
 		}
 
 		for (URL url : packagedProgram.getAllLibraries()) {
@@ -99,5 +103,22 @@ public static JobGraph createJobGraph(
 		return jobGraph;
 	}
 
+	/**
+	 * Creates a {@link JobGraph} with a random {@link JobID}
+	 * from the given {@link PackagedProgram}.
+	 *
+	 * @param packagedProgram to extract the JobGraph from
+	 * @param configuration to use for the optimizer and job graph generator
+	 * @param defaultParallelism for the JobGraph
+	 * @return JobGraph extracted from the PackagedProgram
+	 * @throws ProgramInvocationException if the JobGraph generation failed
+	 */
+	public static JobGraph createJobGraph(
+		PackagedProgram packagedProgram,
+		Configuration configuration,
+		int defaultParallelism) throws ProgramInvocationException {
+		return createJobGraph(packagedProgram, configuration, defaultParallelism, null);
+	}
+
 	private PackagedProgramUtils() {}
 }
diff --git a/flink-clients/src/main/java/org/apache/flink/client/program/rest/RestClusterClient.java b/flink-clients/src/main/java/org/apache/flink/client/program/rest/RestClusterClient.java
index 935a07faf89..c6dc37ef7fe 100644
--- a/flink-clients/src/main/java/org/apache/flink/client/program/rest/RestClusterClient.java
+++ b/flink-clients/src/main/java/org/apache/flink/client/program/rest/RestClusterClient.java
@@ -32,6 +32,7 @@
 import org.apache.flink.client.program.rest.retry.WaitStrategy;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.core.fs.Path;
+import org.apache.flink.runtime.client.JobExecutionException;
 import org.apache.flink.runtime.client.JobStatusMessage;
 import org.apache.flink.runtime.client.JobSubmissionException;
 import org.apache.flink.runtime.clusterframework.messages.GetClusterStatusResponse;
@@ -263,8 +264,8 @@ public JobSubmissionResult submitJob(JobGraph jobGraph, ClassLoader classLoader)
 			try {
 				this.lastJobExecutionResult = jobResult.toJobExecutionResult(classLoader);
 				return lastJobExecutionResult;
-			} catch (JobResult.WrappedJobException we) {
-				throw new ProgramInvocationException("Job failed.", jobGraph.getJobID(), we.getCause());
+			} catch (JobExecutionException e) {
+				throw new ProgramInvocationException("Job failed.", jobGraph.getJobID(), e);
 			} catch (IOException | ClassNotFoundException e) {
 				throw new ProgramInvocationException("Job failed.", jobGraph.getJobID(), e);
 			}
@@ -376,7 +377,7 @@ public JobSubmissionResult submitJob(JobGraph jobGraph, ClassLoader classLoader)
 				(JobSubmitResponseBody jobSubmitResponseBody) -> new JobSubmissionResult(jobGraph.getJobID()))
 			.exceptionally(
 				(Throwable throwable) -> {
-					throw new CompletionException(new JobSubmissionException(jobGraph.getJobID(), "Failed to submit JobGraph.", throwable));
+					throw new CompletionException(new JobSubmissionException(jobGraph.getJobID(), "Failed to submit JobGraph.", ExceptionUtils.stripCompletionException(throwable)));
 				});
 	}
 
diff --git a/flink-clients/src/test/java/org/apache/flink/client/program/rest/RestClusterClientTest.java b/flink-clients/src/test/java/org/apache/flink/client/program/rest/RestClusterClientTest.java
index 75f16c03330..abe59d38bb6 100644
--- a/flink-clients/src/test/java/org/apache/flink/client/program/rest/RestClusterClientTest.java
+++ b/flink-clients/src/test/java/org/apache/flink/client/program/rest/RestClusterClientTest.java
@@ -31,6 +31,7 @@
 import org.apache.flink.configuration.JobManagerOptions;
 import org.apache.flink.configuration.RestOptions;
 import org.apache.flink.runtime.client.JobStatusMessage;
+import org.apache.flink.runtime.clusterframework.ApplicationStatus;
 import org.apache.flink.runtime.concurrent.FutureUtils;
 import org.apache.flink.runtime.dispatcher.Dispatcher;
 import org.apache.flink.runtime.dispatcher.DispatcherGateway;
@@ -122,6 +123,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
+import java.util.Optional;
 import java.util.Queue;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
@@ -229,6 +231,7 @@ public void testJobSubmitCancelStop() throws Exception {
 		TestJobExecutionResultHandler testJobExecutionResultHandler =
 			new TestJobExecutionResultHandler(
 				JobExecutionResultResponseBody.created(new JobResult.Builder()
+					.applicationStatus(ApplicationStatus.SUCCEEDED)
 					.jobId(jobId)
 					.netRuntime(Long.MAX_VALUE)
 					.build()));
@@ -351,11 +354,13 @@ public void testSubmitJobAndWaitForExecutionResult() throws Exception {
 				new RestHandlerException("should trigger retry", HttpResponseStatus.SERVICE_UNAVAILABLE),
 				JobExecutionResultResponseBody.inProgress(),
 				JobExecutionResultResponseBody.created(new JobResult.Builder()
+					.applicationStatus(ApplicationStatus.SUCCEEDED)
 					.jobId(jobId)
 					.netRuntime(Long.MAX_VALUE)
 					.accumulatorResults(Collections.singletonMap("testName", new SerializedValue<>(OptionalFailure.of(1.0))))
 					.build()),
 				JobExecutionResultResponseBody.created(new JobResult.Builder()
+					.applicationStatus(ApplicationStatus.FAILED)
 					.jobId(jobId)
 					.netRuntime(Long.MAX_VALUE)
 					.serializedThrowable(new SerializedThrowable(new RuntimeException("expected")))
@@ -385,8 +390,10 @@ public void testSubmitJobAndWaitForExecutionResult() throws Exception {
 				restClusterClient.submitJob(jobGraph, ClassLoader.getSystemClassLoader());
 				fail("Expected exception not thrown.");
 			} catch (final ProgramInvocationException e) {
-				assertThat(e.getCause(), instanceOf(RuntimeException.class));
-				assertThat(e.getCause().getMessage(), equalTo("expected"));
+				final Optional<RuntimeException> cause = ExceptionUtils.findThrowable(e, RuntimeException.class);
+
+				assertThat(cause.isPresent(), is(true));
+				assertThat(cause.get().getMessage(), equalTo("expected"));
 			}
 		}
 	}
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
index 2a7a21659e4..d3b774c8428 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
@@ -22,12 +22,13 @@
 
 import org.elasticsearch.action.bulk.BulkItemResponse;
 import org.elasticsearch.action.bulk.BulkProcessor;
-import org.elasticsearch.client.Client;
 
 import javax.annotation.Nullable;
 
+import java.io.IOException;
 import java.io.Serializable;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
 
 /**
  * An {@link ElasticsearchApiCallBridge} is used to bridge incompatible Elasticsearch Java API calls across different versions.
@@ -37,17 +38,28 @@
  * <p>Implementations are allowed to be stateful. For example, for Elasticsearch 1.x, since connecting via an embedded node
  * is allowed, the call bridge will hold reference to the created embedded node. Each instance of the sink will hold
  * exactly one instance of the call bridge, and state cleanup is performed when the sink is closed.
+ *
+ * @param <C> The Elasticsearch client, that implements {@link AutoCloseable}.
  */
 @Internal
-public interface ElasticsearchApiCallBridge extends Serializable {
+public interface ElasticsearchApiCallBridge<C extends AutoCloseable> extends Serializable {
 
 	/**
-	 * Creates an Elasticsearch {@link Client}.
+	 * Creates an Elasticsearch client implementing {@link AutoCloseable}.
 	 *
 	 * @param clientConfig The configuration to use when constructing the client.
 	 * @return The created client.
 	 */
-	Client createClient(Map<String, String> clientConfig);
+	C createClient(Map<String, String> clientConfig) throws IOException;
+
+	/**
+	 * Creates a {@link BulkProcessor.Builder} for creating the bulk processor.
+	 *
+	 * @param client the Elasticsearch client.
+	 * @param listener the bulk processor listender.
+	 * @return the bulk processor builder.
+	 */
+	BulkProcessor.Builder createBulkProcessorBuilder(C client, BulkProcessor.Listener listener);
 
 	/**
 	 * Extracts the cause of failure of a bulk item action.
@@ -68,9 +80,24 @@ void configureBulkProcessorBackoff(
 		BulkProcessor.Builder builder,
 		@Nullable ElasticsearchSinkBase.BulkFlushBackoffPolicy flushBackoffPolicy);
 
+	/**
+	 * Creates a {@link RequestIndexer} that is able to work with {@link BulkProcessor} binary compatible.
+	 */
+	default RequestIndexer createBulkProcessorIndexer(
+			BulkProcessor bulkProcessor,
+			boolean flushOnCheckpoint,
+			AtomicLong numPendingRequestsRef) {
+		return new PreElasticsearch6BulkProcessorIndexer(
+			bulkProcessor,
+			flushOnCheckpoint,
+			numPendingRequestsRef);
+	}
+
 	/**
 	 * Perform any necessary state cleanup.
 	 */
-	void cleanup();
+	default void cleanup() {
+		// nothing to cleanup by default
+	}
 
 }
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
index 9105d9947f2..4d0c00252d2 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
@@ -18,6 +18,7 @@
 package org.apache.flink.streaming.connectors.elasticsearch;
 
 import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.java.utils.ParameterTool;
 import org.apache.flink.configuration.Configuration;
@@ -62,9 +63,10 @@
  * for example, to create a Elasticsearch {@link Client}, handle failed item responses, etc.
  *
  * @param <T> Type of the elements handled by this sink
+ * @param <C> Type of the Elasticsearch client, which implements {@link AutoCloseable}
  */
 @Internal
-public abstract class ElasticsearchSinkBase<T> extends RichSinkFunction<T> implements CheckpointedFunction {
+public abstract class ElasticsearchSinkBase<T, C extends AutoCloseable> extends RichSinkFunction<T> implements CheckpointedFunction {
 
 	private static final long serialVersionUID = -1007596293618451942L;
 
@@ -85,6 +87,7 @@
 	/**
 	 * Used to control whether the retry delay should increase exponentially or remain constant.
 	 */
+	@PublicEvolving
 	public enum FlushBackoffType {
 		CONSTANT,
 		EXPONENTIAL
@@ -135,14 +138,20 @@ public void setDelayMillis(long delayMillis) {
 
 	private final Integer bulkProcessorFlushMaxActions;
 	private final Integer bulkProcessorFlushMaxSizeMb;
-	private final Integer bulkProcessorFlushIntervalMillis;
+	private final Long bulkProcessorFlushIntervalMillis;
 	private final BulkFlushBackoffPolicy bulkProcessorFlushBackoffPolicy;
 
 	// ------------------------------------------------------------------------
 	//  User-facing API and configuration
 	// ------------------------------------------------------------------------
 
-	/** The user specified config map that we forward to Elasticsearch when we create the {@link Client}. */
+	/**
+	 * The config map that contains configuration for the bulk flushing behaviours.
+	 *
+	 * <p>For {@link org.elasticsearch.client.transport.TransportClient} based implementations, this config
+	 * map would also contain Elasticsearch-shipped configuration, and therefore this config map
+	 * would also be forwarded when creating the Elasticsearch client.
+	 */
 	private final Map<String, String> userConfig;
 
 	/** The function that is used to construct multiple {@link ActionRequest ActionRequests} from each incoming element. */
@@ -155,14 +164,14 @@ public void setDelayMillis(long delayMillis) {
 	private boolean flushOnCheckpoint = true;
 
 	/** Provided to the user via the {@link ElasticsearchSinkFunction} to add {@link ActionRequest ActionRequests}. */
-	private transient BulkProcessorIndexer requestIndexer;
+	private transient RequestIndexer requestIndexer;
 
 	// ------------------------------------------------------------------------
 	//  Internals for the Flink Elasticsearch Sink
 	// ------------------------------------------------------------------------
 
 	/** Call bridge for different version-specific. */
-	private final ElasticsearchApiCallBridge callBridge;
+	private final ElasticsearchApiCallBridge<C> callBridge;
 
 	/**
 	 * Number of pending action requests not yet acknowledged by Elasticsearch.
@@ -176,7 +185,7 @@ public void setDelayMillis(long delayMillis) {
 	private AtomicLong numPendingRequests = new AtomicLong(0);
 
 	/** Elasticsearch client created using the call bridge. */
-	private transient Client client;
+	private transient C client;
 
 	/** Bulk processor to buffer and send requests to Elasticsearch, created using the client. */
 	private transient BulkProcessor bulkProcessor;
@@ -237,7 +246,7 @@ public ElasticsearchSinkBase(
 		}
 
 		if (params.has(CONFIG_KEY_BULK_FLUSH_INTERVAL_MS)) {
-			bulkProcessorFlushIntervalMillis = params.getInt(CONFIG_KEY_BULK_FLUSH_INTERVAL_MS);
+			bulkProcessorFlushIntervalMillis = params.getLong(CONFIG_KEY_BULK_FLUSH_INTERVAL_MS);
 			userConfig.remove(CONFIG_KEY_BULK_FLUSH_INTERVAL_MS);
 		} else {
 			bulkProcessorFlushIntervalMillis = null;
@@ -286,7 +295,7 @@ public void disableFlushOnCheckpoint() {
 	public void open(Configuration parameters) throws Exception {
 		client = callBridge.createClient(userConfig);
 		bulkProcessor = buildBulkProcessor(new BulkProcessorListener());
-		requestIndexer = new BulkProcessorIndexer(bulkProcessor, flushOnCheckpoint, numPendingRequests);
+		requestIndexer = callBridge.createBulkProcessorIndexer(bulkProcessor, flushOnCheckpoint, numPendingRequests);
 	}
 
 	@Override
@@ -341,7 +350,7 @@ public void close() throws Exception {
 	protected BulkProcessor buildBulkProcessor(BulkProcessor.Listener listener) {
 		checkNotNull(listener);
 
-		BulkProcessor.Builder bulkProcessorBuilder = BulkProcessor.builder(client, listener);
+		BulkProcessor.Builder bulkProcessorBuilder = callBridge.createBulkProcessorBuilder(client, listener);
 
 		// This makes flush() blocking
 		bulkProcessorBuilder.setConcurrentRequests(0);
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/PreElasticsearch6BulkProcessorIndexer.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/PreElasticsearch6BulkProcessorIndexer.java
new file mode 100644
index 00000000000..85f4b9a3ea1
--- /dev/null
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/PreElasticsearch6BulkProcessorIndexer.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.elasticsearch;
+
+import org.apache.flink.annotation.Internal;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.bulk.BulkProcessor;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.update.UpdateRequest;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * Implementation of a {@link RequestIndexer}, using a {@link BulkProcessor}.
+ * {@link ActionRequest ActionRequests} will be buffered before sending a bulk request to the Elasticsearch cluster.
+ *
+ * @deprecated This class is not binary compatible with newer Elasticsearch 6+ versions
+ *             (i.e. the {@link #add(UpdateRequest...)} ). However, this module is currently
+ *             compiled against a very old Elasticsearch version.
+ */
+@Deprecated
+@Internal
+class PreElasticsearch6BulkProcessorIndexer implements RequestIndexer {
+
+	private final BulkProcessor bulkProcessor;
+	private final boolean flushOnCheckpoint;
+	private final AtomicLong numPendingRequestsRef;
+
+	PreElasticsearch6BulkProcessorIndexer(BulkProcessor bulkProcessor, boolean flushOnCheckpoint, AtomicLong numPendingRequestsRef) {
+		this.bulkProcessor = checkNotNull(bulkProcessor);
+		this.flushOnCheckpoint = flushOnCheckpoint;
+		this.numPendingRequestsRef = checkNotNull(numPendingRequestsRef);
+	}
+
+	@Override
+	public void add(DeleteRequest... deleteRequests) {
+		for (DeleteRequest deleteRequest : deleteRequests) {
+			if (flushOnCheckpoint) {
+				numPendingRequestsRef.getAndIncrement();
+			}
+			this.bulkProcessor.add(deleteRequest);
+		}
+	}
+
+	@Override
+	public void add(IndexRequest... indexRequests) {
+		for (IndexRequest indexRequest : indexRequests) {
+			if (flushOnCheckpoint) {
+				numPendingRequestsRef.getAndIncrement();
+			}
+			this.bulkProcessor.add(indexRequest);
+		}
+	}
+
+	@Override
+	public void add(UpdateRequest... updateRequests) {
+		for (UpdateRequest updateRequest : updateRequests) {
+			if (flushOnCheckpoint) {
+				numPendingRequestsRef.getAndIncrement();
+			}
+			this.bulkProcessor.add(updateRequest);
+		}
+	}
+}
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/RequestIndexer.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/RequestIndexer.java
index 2a1b29736b6..3dc8f879641 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/RequestIndexer.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/RequestIndexer.java
@@ -21,9 +21,12 @@
 import org.apache.flink.annotation.PublicEvolving;
 
 import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.update.UpdateRequest;
 
 /**
- * Users add multiple {@link ActionRequest ActionRequests} to a {@link RequestIndexer} to prepare
+ * Users add multiple delete, index or update requests to a {@link RequestIndexer} to prepare
  * them for sending to an Elasticsearch cluster.
  */
 @PublicEvolving
@@ -33,6 +36,41 @@
 	 * Add multiple {@link ActionRequest} to the indexer to prepare for sending requests to Elasticsearch.
 	 *
 	 * @param actionRequests The multiple {@link ActionRequest} to add.
+	 * @deprecated use the {@link DeleteRequest}, {@link IndexRequest} or {@link UpdateRequest}
 	 */
-	void add(ActionRequest... actionRequests);
+	@Deprecated
+	default void add(ActionRequest... actionRequests) {
+		for (ActionRequest actionRequest : actionRequests) {
+			if (actionRequest instanceof IndexRequest) {
+				add((IndexRequest) actionRequest);
+			} else if (actionRequest instanceof DeleteRequest) {
+				add((DeleteRequest) actionRequest);
+			} else if (actionRequest instanceof UpdateRequest) {
+				add((UpdateRequest) actionRequest);
+			} else {
+				throw new IllegalArgumentException("RequestIndexer only supports Index, Delete and Update requests");
+			}
+		}
+	}
+
+	/**
+	 * Add multiple {@link DeleteRequest} to the indexer to prepare for sending requests to Elasticsearch.
+	 *
+	 * @param deleteRequests The multiple {@link DeleteRequest} to add.
+	 */
+	void add(DeleteRequest... deleteRequests);
+
+	/**
+	 * Add multiple {@link IndexRequest} to the indexer to prepare for sending requests to Elasticsearch.
+	 *
+	 * @param indexRequests The multiple {@link IndexRequest} to add.
+	 */
+	void add(IndexRequest... indexRequests);
+
+	/**
+	 * Add multiple {@link UpdateRequest} to the indexer to prepare for sending requests to Elasticsearch.
+	 *
+	 * @param updateRequests The multiple {@link UpdateRequest} to add.
+	 */
+	void add(UpdateRequest... updateRequests);
 }
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java
index 09d8806b963..369d26a735a 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java
@@ -31,6 +31,7 @@
 import org.elasticsearch.action.bulk.BulkProcessor;
 import org.elasticsearch.action.bulk.BulkRequest;
 import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.client.Client;
 import org.elasticsearch.client.Requests;
 import org.junit.Assert;
@@ -92,7 +93,7 @@ public void testItemFailureRethrownOnInvoke() throws Throwable {
 		// setup the next bulk request, and its mock item failures
 		sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList(new Exception("artificial failure for record")));
 		testHarness.processElement(new StreamRecord<>("msg"));
-		verify(sink.getMockBulkProcessor(), times(1)).add(any(ActionRequest.class));
+		verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
 
 		// manually execute the next bulk request
 		sink.manualBulkRequestWithAllPendingRequests();
@@ -124,7 +125,7 @@ public void testItemFailureRethrownOnCheckpoint() throws Throwable {
 		// setup the next bulk request, and its mock item failures
 		sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList(new Exception("artificial failure for record")));
 		testHarness.processElement(new StreamRecord<>("msg"));
-		verify(sink.getMockBulkProcessor(), times(1)).add(any(ActionRequest.class));
+		verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
 
 		// manually execute the next bulk request
 		sink.manualBulkRequestWithAllPendingRequests();
@@ -164,7 +165,7 @@ public void testItemFailureRethrownOnCheckpointAfterFlush() throws Throwable {
 		sink.setMockItemFailuresListForNextBulkItemResponses(mockResponsesList);
 
 		testHarness.processElement(new StreamRecord<>("msg-1"));
-		verify(sink.getMockBulkProcessor(), times(1)).add(any(ActionRequest.class));
+		verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
 
 		// manually execute the next bulk request (1 request only, thus should succeed)
 		sink.manualBulkRequestWithAllPendingRequests();
@@ -172,7 +173,7 @@ public void testItemFailureRethrownOnCheckpointAfterFlush() throws Throwable {
 		// setup the requests to be flushed in the snapshot
 		testHarness.processElement(new StreamRecord<>("msg-2"));
 		testHarness.processElement(new StreamRecord<>("msg-3"));
-		verify(sink.getMockBulkProcessor(), times(3)).add(any(ActionRequest.class));
+		verify(sink.getMockBulkProcessor(), times(3)).add(any(IndexRequest.class));
 
 		CheckedThread snapshotThread = new CheckedThread() {
 			@Override
@@ -217,7 +218,7 @@ public void testBulkFailureRethrownOnInvoke() throws Throwable {
 		// setup the next bulk request, and let the whole bulk request fail
 		sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));
 		testHarness.processElement(new StreamRecord<>("msg"));
-		verify(sink.getMockBulkProcessor(), times(1)).add(any(ActionRequest.class));
+		verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
 
 		// manually execute the next bulk request
 		sink.manualBulkRequestWithAllPendingRequests();
@@ -249,7 +250,7 @@ public void testBulkFailureRethrownOnCheckpoint() throws Throwable {
 		// setup the next bulk request, and let the whole bulk request fail
 		sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));
 		testHarness.processElement(new StreamRecord<>("msg"));
-		verify(sink.getMockBulkProcessor(), times(1)).add(any(ActionRequest.class));
+		verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
 
 		// manually execute the next bulk request
 		sink.manualBulkRequestWithAllPendingRequests();
@@ -284,7 +285,7 @@ public void testBulkFailureRethrownOnOnCheckpointAfterFlush() throws Throwable {
 		// setup the next bulk request, and let bulk request succeed
 		sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList((Exception) null));
 		testHarness.processElement(new StreamRecord<>("msg-1"));
-		verify(sink.getMockBulkProcessor(), times(1)).add(any(ActionRequest.class));
+		verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
 
 		// manually execute the next bulk request
 		sink.manualBulkRequestWithAllPendingRequests();
@@ -292,7 +293,7 @@ public void testBulkFailureRethrownOnOnCheckpointAfterFlush() throws Throwable {
 		// setup the requests to be flushed in the snapshot
 		testHarness.processElement(new StreamRecord<>("msg-2"));
 		testHarness.processElement(new StreamRecord<>("msg-3"));
-		verify(sink.getMockBulkProcessor(), times(3)).add(any(ActionRequest.class));
+		verify(sink.getMockBulkProcessor(), times(3)).add(any(IndexRequest.class));
 
 		CheckedThread snapshotThread = new CheckedThread() {
 			@Override
@@ -346,7 +347,7 @@ public void testAtLeastOnceSink() throws Throwable {
 		// it contains 1 request, which will fail and re-added to the next bulk request
 		sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList(new Exception("artificial failure for record")));
 		testHarness.processElement(new StreamRecord<>("msg"));
-		verify(sink.getMockBulkProcessor(), times(1)).add(any(ActionRequest.class));
+		verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
 
 		CheckedThread snapshotThread = new CheckedThread() {
 			@Override
@@ -402,7 +403,7 @@ public void testDoesNotWaitForPendingRequestsIfFlushingDisabled() throws Excepti
 		// setup the next bulk request, and let bulk request succeed
 		sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList(new Exception("artificial failure for record")));
 		testHarness.processElement(new StreamRecord<>("msg-1"));
-		verify(sink.getMockBulkProcessor(), times(1)).add(any(ActionRequest.class));
+		verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));
 
 		// the snapshot should not block even though we haven't flushed the bulk request
 		testHarness.snapshot(1L, 1000L);
@@ -410,7 +411,7 @@ public void testDoesNotWaitForPendingRequestsIfFlushingDisabled() throws Excepti
 		testHarness.close();
 	}
 
-	private static class DummyElasticsearchSink<T> extends ElasticsearchSinkBase<T> {
+	private static class DummyElasticsearchSink<T> extends ElasticsearchSinkBase<T, Client> {
 
 		private static final long serialVersionUID = 5051907841570096991L;
 
@@ -478,11 +479,11 @@ public BulkProcessor getMockBulkProcessor() {
 		protected BulkProcessor buildBulkProcessor(final BulkProcessor.Listener listener) {
 			this.mockBulkProcessor = mock(BulkProcessor.class);
 
-			when(mockBulkProcessor.add(any(ActionRequest.class))).thenAnswer(new Answer<Object>() {
+			when(mockBulkProcessor.add(any(IndexRequest.class))).thenAnswer(new Answer<Object>() {
 				@Override
 				public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
 					// intercept the request and add it to our mock bulk request
-					nextBulkRequest.add(invocationOnMock.getArgumentAt(0, ActionRequest.class));
+					nextBulkRequest.add(invocationOnMock.getArgumentAt(0, IndexRequest.class));
 
 					return null;
 				}
@@ -530,7 +531,7 @@ public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
 		}
 	}
 
-	private static class DummyElasticsearchApiCallBridge implements ElasticsearchApiCallBridge {
+	private static class DummyElasticsearchApiCallBridge implements ElasticsearchApiCallBridge<Client> {
 
 		private static final long serialVersionUID = -4272760730959041699L;
 
@@ -539,6 +540,11 @@ public Client createClient(Map<String, String> clientConfig) {
 			return mock(Client.class);
 		}
 
+		@Override
+		public BulkProcessor.Builder createBulkProcessorBuilder(Client client, BulkProcessor.Listener listener) {
+			return null;
+		}
+
 		@Nullable
 		@Override
 		public Throwable extractFailureCauseFromBulkItemResponse(BulkItemResponse bulkItemResponse) {
@@ -553,11 +559,6 @@ public Throwable extractFailureCauseFromBulkItemResponse(BulkItemResponse bulkIt
 		public void configureBulkProcessorBackoff(BulkProcessor.Builder builder, @Nullable ElasticsearchSinkBase.BulkFlushBackoffPolicy flushBackoffPolicy) {
 			// no need for this in the test cases here
 		}
-
-		@Override
-		public void cleanup() {
-			// nothing to cleanup
-		}
 	}
 
 	private static class SimpleSinkFunction<String> implements ElasticsearchSinkFunction<String> {
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java
index df3779b1fd4..819ffba5d2a 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java
@@ -26,7 +26,6 @@
 import org.apache.flink.util.InstantiationUtil;
 
 import org.elasticsearch.client.Client;
-import org.elasticsearch.client.transport.TransportClient;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -34,19 +33,20 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.net.InetSocketAddress;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 /**
  * Environment preparation and suite of tests for version-specific {@link ElasticsearchSinkBase} implementations.
+ *
+ * @param <C> Elasticsearch client type
+ * @param <A> The address type to use
  */
-public abstract class ElasticsearchSinkTestBase extends AbstractTestBase {
+public abstract class ElasticsearchSinkTestBase<C extends AutoCloseable, A> extends AbstractTestBase {
 
 	private static final Logger LOG = LoggerFactory.getLogger(ElasticsearchSinkTestBase.class);
 
@@ -85,24 +85,21 @@ public static void shutdown() throws Exception {
 	}
 
 	/**
-	 * Tests that the Elasticsearch sink works properly using a {@link TransportClient}.
+	 * Tests that the Elasticsearch sink works properly.
 	 */
-	public void runTransportClientTest() throws Exception {
-		final String index = "transport-client-test-index";
+	public void runElasticsearchSinkTest() throws Exception {
+		final String index = "elasticsearch-sink-test-index";
 
 		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
 
 		DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new SourceSinkDataTestKit.TestDataSourceFunction());
 
-		Map<String, String> userConfig = new HashMap<>();
-		// This instructs the sink to emit after every element, otherwise they would be buffered
-		userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
-		userConfig.put("cluster.name", CLUSTER_NAME);
-
 		source.addSink(createElasticsearchSinkForEmbeddedNode(
-			userConfig, new SourceSinkDataTestKit.TestElasticsearchSinkFunction(index)));
+				1,
+				CLUSTER_NAME,
+				new SourceSinkDataTestKit.TestElasticsearchSinkFunction(index)));
 
-		env.execute("Elasticsearch TransportClient Test");
+		env.execute("Elasticsearch Sink Test");
 
 		// verify the results
 		Client client = embeddedNodeEnv.getClient();
@@ -112,16 +109,20 @@ public void runTransportClientTest() throws Exception {
 	}
 
 	/**
-	 * Tests that the Elasticsearch sink fails eagerly if the provided list of transport addresses is {@code null}.
+	 * Tests that the Elasticsearch sink fails eagerly if the provided list of addresses is {@code null}.
 	 */
-	public void runNullTransportClientTest() throws Exception {
+	public void runNullAddressesTest() throws Exception {
 		Map<String, String> userConfig = new HashMap<>();
 		userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
-		userConfig.put("cluster.name", "my-transport-client-cluster");
+		userConfig.put("cluster.name", CLUSTER_NAME);
 
 		try {
-			createElasticsearchSink(userConfig, null, new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test"));
-		} catch (IllegalArgumentException expectedException) {
+			createElasticsearchSink(
+					1,
+					CLUSTER_NAME,
+					null,
+					new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test"));
+		} catch (IllegalArgumentException | NullPointerException expectedException) {
 			// test passes
 			return;
 		}
@@ -130,18 +131,19 @@ public void runNullTransportClientTest() throws Exception {
 	}
 
 	/**
-	 * Tests that the Elasticsearch sink fails eagerly if the provided list of transport addresses is empty.
+	 * Tests that the Elasticsearch sink fails eagerly if the provided list of addresses is empty.
 	 */
-	public void runEmptyTransportClientTest() throws Exception {
+	public void runEmptyAddressesTest() throws Exception {
 		Map<String, String> userConfig = new HashMap<>();
 		userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
-		userConfig.put("cluster.name", "my-transport-client-cluster");
+		userConfig.put("cluster.name", CLUSTER_NAME);
 
 		try {
 			createElasticsearchSink(
-				userConfig,
-				Collections.<InetSocketAddress>emptyList(),
-				new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test"));
+					1,
+					CLUSTER_NAME,
+					Collections.emptyList(),
+					new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test"));
 		} catch (IllegalArgumentException expectedException) {
 			// test passes
 			return;
@@ -153,39 +155,66 @@ public void runEmptyTransportClientTest() throws Exception {
 	/**
 	 * Tests whether the Elasticsearch sink fails when there is no cluster to connect to.
 	 */
-	public void runTransportClientFailsTest() throws Exception {
+	public void runInvalidElasticsearchClusterTest() throws Exception {
 		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
 
 		DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new SourceSinkDataTestKit.TestDataSourceFunction());
 
 		Map<String, String> userConfig = new HashMap<>();
 		userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
-		userConfig.put("cluster.name", "my-transport-client-cluster");
+		userConfig.put("cluster.name", "invalid-cluster-name");
 
-		source.addSink(createElasticsearchSinkForEmbeddedNode(
-			Collections.unmodifiableMap(userConfig), new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test")));
+		source.addSink(createElasticsearchSinkForNode(
+				1,
+				"invalid-cluster-name",
+				new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test"),
+				"123.123.123.123")); // incorrect ip address
 
 		try {
-			env.execute("Elasticsearch Transport Client Test");
+			env.execute("Elasticsearch Sink Test");
 		} catch (JobExecutionException expectedException) {
-			assertTrue(expectedException.getCause().getMessage().contains("not connected to any Elasticsearch nodes"));
+			// test passes
 			return;
 		}
 
 		fail();
 	}
 
+	/**
+	 * Utility method to create a user config map.
+	 */
+	protected Map<String, String> createUserConfig(int bulkFlushMaxActions, String clusterName) {
+		Map<String, String> userConfig = new HashMap<>();
+		userConfig.put("cluster.name", clusterName);
+		userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, String.valueOf(bulkFlushMaxActions));
+
+		return userConfig;
+	}
+
 	/** Creates a version-specific Elasticsearch sink, using arbitrary transport addresses. */
-	protected abstract <T> ElasticsearchSinkBase<T> createElasticsearchSink(Map<String, String> userConfig,
-																			List<InetSocketAddress> transportAddresses,
-																			ElasticsearchSinkFunction<T> elasticsearchSinkFunction);
+	protected abstract ElasticsearchSinkBase<Tuple2<Integer, String>, C> createElasticsearchSink(
+			int bulkFlushMaxActions,
+			String clusterName,
+			List<A> addresses,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction);
 
 	/**
 	 * Creates a version-specific Elasticsearch sink to connect to a local embedded Elasticsearch node.
 	 *
-	 * <p>This case is singled out from {@link ElasticsearchSinkTestBase#createElasticsearchSink(Map, List, ElasticsearchSinkFunction)}
+	 * <p>This case is singled out from {@link ElasticsearchSinkTestBase#createElasticsearchSink(int, String, List, ElasticsearchSinkFunction)}
 	 * because the Elasticsearch Java API to do so is incompatible across different versions.
 	 */
-	protected abstract <T> ElasticsearchSinkBase<T> createElasticsearchSinkForEmbeddedNode(
-		Map<String, String> userConfig, ElasticsearchSinkFunction<T> elasticsearchSinkFunction) throws Exception;
+	protected abstract ElasticsearchSinkBase<Tuple2<Integer, String>, C> createElasticsearchSinkForEmbeddedNode(
+			int bulkFlushMaxActions,
+			String clusterName,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction) throws Exception;
+
+	/**
+	 * Creates a version-specific Elasticsearch sink to connect to a specific Elasticsearch node.
+	 */
+	protected abstract ElasticsearchSinkBase<Tuple2<Integer, String>, C> createElasticsearchSinkForNode(
+			int bulkFlushMaxActions,
+			String clusterName,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction,
+			String ipAddress) throws Exception;
 }
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironment.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironment.java
index ea6e7a3ac70..fd14ba36370 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironment.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironment.java
@@ -29,7 +29,7 @@
  *       also be located under the same package. The intentional package-private accessibility of this interface
  *       enforces that.
  */
-interface EmbeddedElasticsearchNodeEnvironment {
+public interface EmbeddedElasticsearchNodeEnvironment {
 
 	/**
 	 * Start an embedded Elasticsearch node instance.
diff --git a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java
index 2a3c2a06460..4f1cd086d8f 100644
--- a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/Elasticsearch1ApiCallBridge.java
@@ -42,7 +42,7 @@
  * Implementation of {@link ElasticsearchApiCallBridge} for Elasticsearch 1.x.
  */
 @Internal
-public class Elasticsearch1ApiCallBridge implements ElasticsearchApiCallBridge {
+public class Elasticsearch1ApiCallBridge implements ElasticsearchApiCallBridge<Client> {
 
 	private static final long serialVersionUID = -2632363720584123682L;
 
@@ -115,6 +115,11 @@ public Client createClient(Map<String, String> clientConfig) {
 		}
 	}
 
+	@Override
+	public BulkProcessor.Builder createBulkProcessorBuilder(Client client, BulkProcessor.Listener listener) {
+		return BulkProcessor.builder(client, listener);
+	}
+
 	@Override
 	public Throwable extractFailureCauseFromBulkItemResponse(BulkItemResponse bulkItemResponse) {
 		if (!bulkItemResponse.isFailed()) {
diff --git a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java
index e8eccd978f4..d5e1d1fdc12 100644
--- a/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java
+++ b/flink-connectors/flink-connector-elasticsearch/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSink.java
@@ -23,6 +23,7 @@
 import org.elasticsearch.action.ActionRequest;
 import org.elasticsearch.action.bulk.BulkProcessor;
 import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.client.Client;
 import org.elasticsearch.client.transport.TransportClient;
 import org.elasticsearch.common.transport.TransportAddress;
 import org.elasticsearch.node.Node;
@@ -64,7 +65,7 @@
  * @param <T> Type of the elements handled by this sink
  */
 @PublicEvolving
-public class ElasticsearchSink<T> extends ElasticsearchSinkBase<T> {
+public class ElasticsearchSink<T> extends ElasticsearchSinkBase<T, Client> {
 
 	private static final long serialVersionUID = 1L;
 
diff --git a/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkITCase.java b/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkITCase.java
index 54892909abc..2f1a65c58ee 100644
--- a/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkITCase.java
+++ b/flink-connectors/flink-connector-elasticsearch/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkITCase.java
@@ -28,10 +28,12 @@
 import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.client.Client;
 import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
 import org.elasticsearch.common.transport.LocalTransportAddress;
 import org.elasticsearch.common.transport.TransportAddress;
 import org.junit.Test;
 
+import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -42,26 +44,26 @@
 /**
  * IT Cases for the {@link ElasticsearchSink}.
  */
-public class ElasticsearchSinkITCase extends ElasticsearchSinkTestBase {
+public class ElasticsearchSinkITCase extends ElasticsearchSinkTestBase<Client, InetSocketAddress> {
 
 	@Test
-	public void testTransportClient() throws Exception {
-		runTransportClientTest();
+	public void testElasticsearchSink() throws Exception {
+		runElasticsearchSinkTest();
 	}
 
 	@Test
-	public void testNullTransportClient() throws Exception {
-		runNullTransportClientTest();
+	public void testNullAddresses() throws Exception {
+		runNullAddressesTest();
 	}
 
 	@Test
-	public void testEmptyTransportClient() throws Exception {
-		runEmptyTransportClientTest();
+	public void testEmptyAddresses() throws Exception {
+		runEmptyAddressesTest();
 	}
 
 	@Test
-	public void testTransportClientFails() throws Exception{
-		runTransportClientFailsTest();
+	public void testInvalidElasticsearchCluster() throws Exception{
+		runInvalidElasticsearchClusterTest();
 	}
 
 	// -- Tests specific to Elasticsearch 1.x --
@@ -102,19 +104,28 @@ public void testDeprecatedIndexRequestBuilderVariant() throws Exception {
 	}
 
 	@Override
-	protected <T> ElasticsearchSinkBase<T> createElasticsearchSink(Map<String, String> userConfig,
-																List<InetSocketAddress> transportAddresses,
-																ElasticsearchSinkFunction<T> elasticsearchSinkFunction) {
-		return new ElasticsearchSink<>(userConfig, ElasticsearchUtils.convertInetSocketAddresses(transportAddresses), elasticsearchSinkFunction);
+	protected ElasticsearchSinkBase<Tuple2<Integer, String>, Client> createElasticsearchSink(
+			int bulkFlushMaxActions,
+			String clusterName,
+			List<InetSocketAddress> transportAddresses,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction) {
+
+		return new ElasticsearchSink<>(
+				Collections.unmodifiableMap(createUserConfig(bulkFlushMaxActions, clusterName)),
+				ElasticsearchUtils.convertInetSocketAddresses(transportAddresses),
+				elasticsearchSinkFunction);
 	}
 
 	@Override
-	protected <T> ElasticsearchSinkBase<T> createElasticsearchSinkForEmbeddedNode(
-		Map<String, String> userConfig, ElasticsearchSinkFunction<T> elasticsearchSinkFunction) throws Exception {
+	protected ElasticsearchSinkBase<Tuple2<Integer, String>, Client> createElasticsearchSinkForEmbeddedNode(
+			int bulkFlushMaxActions,
+			String clusterName,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction) throws Exception {
+
+		Map<String, String> userConfig = createUserConfig(bulkFlushMaxActions, clusterName);
 
 		// Elasticsearch 1.x requires this setting when using
 		// LocalTransportAddress to connect to a local embedded node
-		userConfig = new HashMap<>(userConfig);
 		userConfig.put("node.local", "true");
 
 		List<TransportAddress> transports = new ArrayList<>();
@@ -126,6 +137,22 @@ public void testDeprecatedIndexRequestBuilderVariant() throws Exception {
 			elasticsearchSinkFunction);
 	}
 
+	@Override
+	protected ElasticsearchSinkBase<Tuple2<Integer, String>, Client> createElasticsearchSinkForNode(
+			int bulkFlushMaxActions,
+			String clusterName,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction,
+			String ipAddress) throws Exception {
+
+		List<TransportAddress> transports = new ArrayList<>();
+		transports.add(new InetSocketTransportAddress(InetAddress.getByName(ipAddress), 9300));
+
+		return new ElasticsearchSink<>(
+			Collections.unmodifiableMap(createUserConfig(bulkFlushMaxActions, clusterName)),
+			transports,
+			elasticsearchSinkFunction);
+	}
+
 	/**
 	 * A {@link IndexRequestBuilder} with equivalent functionality to {@link SourceSinkDataTestKit.TestElasticsearchSinkFunction}.
 	 */
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java
index 390a4078e2b..73a69ebde34 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/Elasticsearch2ApiCallBridge.java
@@ -26,7 +26,6 @@
 import org.elasticsearch.action.bulk.BackoffPolicy;
 import org.elasticsearch.action.bulk.BulkItemResponse;
 import org.elasticsearch.action.bulk.BulkProcessor;
-import org.elasticsearch.client.Client;
 import org.elasticsearch.client.transport.TransportClient;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.transport.TransportAddress;
@@ -44,7 +43,7 @@
  * Implementation of {@link ElasticsearchApiCallBridge} for Elasticsearch 2.x.
  */
 @Internal
-public class Elasticsearch2ApiCallBridge implements ElasticsearchApiCallBridge {
+public class Elasticsearch2ApiCallBridge implements ElasticsearchApiCallBridge<TransportClient> {
 
 	private static final long serialVersionUID = 2638252694744361079L;
 
@@ -63,7 +62,7 @@
 	}
 
 	@Override
-	public Client createClient(Map<String, String> clientConfig) {
+	public TransportClient createClient(Map<String, String> clientConfig) {
 		Settings settings = Settings.settingsBuilder().put(clientConfig).build();
 
 		TransportClient transportClient = TransportClient.builder().settings(settings).build();
@@ -83,6 +82,11 @@ public Client createClient(Map<String, String> clientConfig) {
 		return transportClient;
 	}
 
+	@Override
+	public BulkProcessor.Builder createBulkProcessorBuilder(TransportClient client, BulkProcessor.Listener listener) {
+		return BulkProcessor.builder(client, listener);
+	}
+
 	@Override
 	public Throwable extractFailureCauseFromBulkItemResponse(BulkItemResponse bulkItemResponse) {
 		if (!bulkItemResponse.isFailed()) {
@@ -117,10 +121,4 @@ public void configureBulkProcessorBackoff(
 
 		builder.setBackoffPolicy(backoffPolicy);
 	}
-
-	@Override
-	public void cleanup() {
-		// nothing to cleanup
-	}
-
 }
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java
index ffccacf40ac..a911905ac0a 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/main/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSink.java
@@ -58,7 +58,7 @@
  * @param <T> Type of the elements handled by this sink
  */
 @PublicEvolving
-public class ElasticsearchSink<T> extends ElasticsearchSinkBase<T> {
+public class ElasticsearchSink<T> extends ElasticsearchSinkBase<T, TransportClient> {
 
 	private static final long serialVersionUID = 1L;
 
diff --git a/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkITCase.java b/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkITCase.java
index 7ded893be3a..7887e72fa10 100644
--- a/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkITCase.java
+++ b/flink-connectors/flink-connector-elasticsearch2/src/test/java/org/apache/flink/streaming/connectors/elasticsearch2/ElasticsearchSinkITCase.java
@@ -17,57 +17,81 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch2;
 
+import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkTestBase;
 
+import org.elasticsearch.client.transport.TransportClient;
 import org.junit.Test;
 
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
-import java.util.Map;
 
 /**
  * IT cases for the {@link ElasticsearchSink}.
  */
-public class ElasticsearchSinkITCase extends ElasticsearchSinkTestBase {
+public class ElasticsearchSinkITCase extends ElasticsearchSinkTestBase<TransportClient, InetSocketAddress> {
 
 	@Test
-	public void testTransportClient() throws Exception {
-		runTransportClientTest();
+	public void testElasticsearchSink() throws Exception {
+		runElasticsearchSinkTest();
 	}
 
 	@Test
-	public void testNullTransportClient() throws Exception {
-		runNullTransportClientTest();
+	public void testNullAddresses() throws Exception {
+		runNullAddressesTest();
 	}
 
 	@Test
-	public void testEmptyTransportClient() throws Exception {
-		runEmptyTransportClientTest();
+	public void testEmptyAddresses() throws Exception {
+		runEmptyAddressesTest();
 	}
 
 	@Test
-	public void testTransportClientFails() throws Exception{
-		runTransportClientFailsTest();
+	public void testInvalidElasticsearchCluster() throws Exception{
+		runInvalidElasticsearchClusterTest();
 	}
 
 	@Override
-	protected <T> ElasticsearchSinkBase<T> createElasticsearchSink(Map<String, String> userConfig,
-																List<InetSocketAddress> transportAddresses,
-																ElasticsearchSinkFunction<T> elasticsearchSinkFunction) {
-		return new ElasticsearchSink<>(userConfig, transportAddresses, elasticsearchSinkFunction);
+	protected ElasticsearchSinkBase<Tuple2<Integer, String>, TransportClient> createElasticsearchSink(
+			int bulkFlushMaxActions,
+			String clusterName,
+			List<InetSocketAddress> transportAddresses,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction) {
+
+		return new ElasticsearchSink<>(
+				Collections.unmodifiableMap(createUserConfig(bulkFlushMaxActions, clusterName)),
+				transportAddresses,
+				elasticsearchSinkFunction);
+	}
+
+	@Override
+	protected ElasticsearchSinkBase<Tuple2<Integer, String>, TransportClient> createElasticsearchSinkForEmbeddedNode(
+			int bulkFlushMaxActions,
+			String clusterName,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction) throws Exception {
+
+		return createElasticsearchSinkForNode(
+				bulkFlushMaxActions, clusterName, elasticsearchSinkFunction, "127.0.0.1");
 	}
 
 	@Override
-	protected <T> ElasticsearchSinkBase<T> createElasticsearchSinkForEmbeddedNode(
-		Map<String, String> userConfig, ElasticsearchSinkFunction<T> elasticsearchSinkFunction) throws Exception {
+	protected ElasticsearchSinkBase<Tuple2<Integer, String>, TransportClient> createElasticsearchSinkForNode(
+			int bulkFlushMaxActions,
+			String clusterName,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction,
+			String ipAddress) throws Exception {
 
 		List<InetSocketAddress> transports = new ArrayList<>();
-		transports.add(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9300));
+		transports.add(new InetSocketAddress(InetAddress.getByName(ipAddress), 9300));
 
-		return new ElasticsearchSink<>(userConfig, transports, elasticsearchSinkFunction);
+		return new ElasticsearchSink<>(
+				Collections.unmodifiableMap(createUserConfig(bulkFlushMaxActions, clusterName)),
+				transports,
+				elasticsearchSinkFunction);
 	}
 }
diff --git a/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java
index 7c4ba7a97f1..a3453ec4445 100644
--- a/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java
+++ b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/Elasticsearch5ApiCallBridge.java
@@ -26,7 +26,6 @@
 import org.elasticsearch.action.bulk.BackoffPolicy;
 import org.elasticsearch.action.bulk.BulkItemResponse;
 import org.elasticsearch.action.bulk.BulkProcessor;
-import org.elasticsearch.client.Client;
 import org.elasticsearch.client.transport.TransportClient;
 import org.elasticsearch.common.network.NetworkModule;
 import org.elasticsearch.common.settings.Settings;
@@ -47,7 +46,7 @@
  * Implementation of {@link ElasticsearchApiCallBridge} for Elasticsearch 5.x.
  */
 @Internal
-public class Elasticsearch5ApiCallBridge implements ElasticsearchApiCallBridge {
+public class Elasticsearch5ApiCallBridge implements ElasticsearchApiCallBridge<TransportClient> {
 
 	private static final long serialVersionUID = -5222683870097809633L;
 
@@ -66,7 +65,7 @@
 	}
 
 	@Override
-	public Client createClient(Map<String, String> clientConfig) {
+	public TransportClient createClient(Map<String, String> clientConfig) {
 		Settings settings = Settings.builder().put(clientConfig)
 			.put(NetworkModule.HTTP_TYPE_KEY, Netty3Plugin.NETTY_HTTP_TRANSPORT_NAME)
 			.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty3Plugin.NETTY_TRANSPORT_NAME)
@@ -89,6 +88,11 @@ public Client createClient(Map<String, String> clientConfig) {
 		return transportClient;
 	}
 
+	@Override
+	public BulkProcessor.Builder createBulkProcessorBuilder(TransportClient client, BulkProcessor.Listener listener) {
+		return BulkProcessor.builder(client, listener);
+	}
+
 	@Override
 	public Throwable extractFailureCauseFromBulkItemResponse(BulkItemResponse bulkItemResponse) {
 		if (!bulkItemResponse.isFailed()) {
@@ -123,10 +127,4 @@ public void configureBulkProcessorBackoff(
 
 		builder.setBackoffPolicy(backoffPolicy);
 	}
-
-	@Override
-	public void cleanup() {
-		// nothing to cleanup
-	}
-
 }
diff --git a/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java
index 6c09337227a..b99b3539255 100644
--- a/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java
+++ b/flink-connectors/flink-connector-elasticsearch5/src/main/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSink.java
@@ -59,7 +59,7 @@
  * @param <T> Type of the elements handled by this sink
  */
 @PublicEvolving
-public class ElasticsearchSink<T> extends ElasticsearchSinkBase<T> {
+public class ElasticsearchSink<T> extends ElasticsearchSinkBase<T, TransportClient> {
 
 	private static final long serialVersionUID = 1L;
 
diff --git a/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSinkITCase.java b/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSinkITCase.java
index ad7c664cac7..67daa409b7b 100644
--- a/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSinkITCase.java
+++ b/flink-connectors/flink-connector-elasticsearch5/src/test/java/org/apache/flink/streaming/connectors/elasticsearch5/ElasticsearchSinkITCase.java
@@ -18,58 +18,85 @@
 
 package org.apache.flink.streaming.connectors.elasticsearch5;
 
+import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
 import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkTestBase;
 
+import org.elasticsearch.client.transport.TransportClient;
 import org.junit.Test;
 
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
-import java.util.Map;
 
 /**
  * IT cases for the {@link ElasticsearchSink}.
+ *
+ * <p>The Elasticsearch ITCases for 5.x CANNOT be executed in the IDE directly, since it is required that the
+ * Log4J-to-SLF4J adapter dependency must be excluded from the test classpath for the Elasticsearch embedded
+ * node used in the tests to work properly.
  */
-public class ElasticsearchSinkITCase extends ElasticsearchSinkTestBase {
+public class ElasticsearchSinkITCase extends ElasticsearchSinkTestBase<TransportClient, InetSocketAddress> {
 
 	@Test
-	public void testTransportClient() throws Exception {
-		runTransportClientTest();
+	public void testElasticsearchSink() throws Exception {
+		runElasticsearchSinkTest();
 	}
 
 	@Test
-	public void testNullTransportClient() throws Exception {
-		runNullTransportClientTest();
+	public void testNullAddresses() throws Exception {
+		runNullAddressesTest();
 	}
 
 	@Test
-	public void testEmptyTransportClient() throws Exception {
-		runEmptyTransportClientTest();
+	public void testEmptyAddresses() throws Exception {
+		runEmptyAddressesTest();
 	}
 
 	@Test
-	public void testTransportClientFails() throws Exception {
-		runTransportClientFailsTest();
+	public void testInvalidElasticsearchCluster() throws Exception{
+		runInvalidElasticsearchClusterTest();
 	}
 
 	@Override
-	protected <T> ElasticsearchSinkBase<T> createElasticsearchSink(Map<String, String> userConfig,
-																List<InetSocketAddress> transportAddresses,
-																ElasticsearchSinkFunction<T> elasticsearchSinkFunction) {
-		return new ElasticsearchSink<>(userConfig, transportAddresses, elasticsearchSinkFunction);
+	protected ElasticsearchSinkBase<Tuple2<Integer, String>, TransportClient> createElasticsearchSink(
+			int bulkFlushMaxActions,
+			String clusterName,
+			List<InetSocketAddress> addresses,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction) {
+
+		return new ElasticsearchSink<>(
+				Collections.unmodifiableMap(createUserConfig(bulkFlushMaxActions, clusterName)),
+				addresses,
+				elasticsearchSinkFunction);
 	}
 
 	@Override
-	protected <T> ElasticsearchSinkBase<T> createElasticsearchSinkForEmbeddedNode(
-		Map<String, String> userConfig, ElasticsearchSinkFunction<T> elasticsearchSinkFunction) throws Exception {
+	protected ElasticsearchSinkBase<Tuple2<Integer, String>, TransportClient> createElasticsearchSinkForEmbeddedNode(
+			int bulkFlushMaxActions,
+			String clusterName,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction) throws Exception {
+
+		return createElasticsearchSinkForNode(
+				bulkFlushMaxActions, clusterName, elasticsearchSinkFunction, "127.0.0.1");
+	}
+
+	@Override
+	protected ElasticsearchSinkBase<Tuple2<Integer, String>, TransportClient> createElasticsearchSinkForNode(
+			int bulkFlushMaxActions,
+			String clusterName,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction,
+			String ipAddress) throws Exception {
 
 		List<InetSocketAddress> transports = new ArrayList<>();
-		transports.add(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9300));
+		transports.add(new InetSocketAddress(InetAddress.getByName(ipAddress), 9300));
 
-		return new ElasticsearchSink<>(userConfig, transports, elasticsearchSinkFunction);
+		return new ElasticsearchSink<>(
+				Collections.unmodifiableMap(createUserConfig(bulkFlushMaxActions, clusterName)),
+				transports,
+				elasticsearchSinkFunction);
 	}
-
 }
diff --git a/flink-connectors/flink-connector-elasticsearch6/pom.xml b/flink-connectors/flink-connector-elasticsearch6/pom.xml
new file mode 100644
index 00000000000..4af64cb7048
--- /dev/null
+++ b/flink-connectors/flink-connector-elasticsearch6/pom.xml
@@ -0,0 +1,183 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+			xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+			xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+	<modelVersion>4.0.0</modelVersion>
+
+	<parent>
+		<groupId>org.apache.flink</groupId>
+		<artifactId>flink-connectors</artifactId>
+		<version>1.6-SNAPSHOT</version>
+		<relativePath>..</relativePath>
+	</parent>
+
+	<artifactId>flink-connector-elasticsearch6_${scala.binary.version}</artifactId>
+	<name>flink-connector-elasticsearch6</name>
+
+	<packaging>jar</packaging>
+
+	<!-- Allow users to pass custom connector versions -->
+	<properties>
+		<elasticsearch.version>6.3.1</elasticsearch.version>
+	</properties>
+
+	<dependencies>
+
+		<!-- core dependencies -->
+
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-streaming-java_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<scope>provided</scope>
+		</dependency>
+
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-connector-elasticsearch-base_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<exclusions>
+				<!-- Elasticsearch Java Client has been moved to a different module in 5.x -->
+				<exclusion>
+					<groupId>org.elasticsearch</groupId>
+					<artifactId>elasticsearch</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+
+		<!-- Dependency for Elasticsearch 6.x REST Client -->
+		<dependency>
+			<groupId>org.elasticsearch.client</groupId>
+			<artifactId>elasticsearch-rest-high-level-client</artifactId>
+			<version>${elasticsearch.version}</version>
+		</dependency>
+
+		<!--
+			Elasticsearch 5.x uses Log4j2 and no longer detects logging implementations, making
+			Log4j2 a strict dependency. The following is added so that the Log4j2 API in
+			Elasticsearch 5.x is routed to SLF4J. This way, user projects can remain flexible
+			in the logging implementation preferred.
+		-->
+
+		<dependency>
+			<groupId>org.apache.logging.log4j</groupId>
+			<artifactId>log4j-to-slf4j</artifactId>
+			<version>2.9.1</version>
+		</dependency>
+
+		<!-- test dependencies -->
+
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-test-utils_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<scope>test</scope>
+		</dependency>
+
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-streaming-java_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<scope>test</scope>
+			<type>test-jar</type>
+		</dependency>
+
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-connector-elasticsearch-base_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<exclusions>
+				<exclusion>
+					<groupId>org.elasticsearch</groupId>
+					<artifactId>elasticsearch</artifactId>
+				</exclusion>
+			</exclusions>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+
+		<!--
+   			 Including elasticsearch transport dependency for tests. Netty3 is not here anymore in 6.x
+		-->
+
+		<dependency>
+			<groupId>org.elasticsearch.client</groupId>
+			<artifactId>transport</artifactId>
+			<version>${elasticsearch.version}</version>
+			<scope>test</scope>
+		</dependency>
+
+		<dependency>
+			<groupId>org.elasticsearch.plugin</groupId>
+			<artifactId>transport-netty4-client</artifactId>
+			<version>${elasticsearch.version}</version>
+			<scope>test</scope>
+		</dependency>
+
+		<!--
+			Including Log4j2 dependencies for tests is required for the
+			embedded Elasticsearch nodes used in tests to run correctly.
+		-->
+
+		<dependency>
+			<groupId>org.apache.logging.log4j</groupId>
+			<artifactId>log4j-api</artifactId>
+			<version>2.9.1</version>
+			<scope>test</scope>
+		</dependency>
+
+		<dependency>
+			<groupId>org.apache.logging.log4j</groupId>
+			<artifactId>log4j-core</artifactId>
+			<version>2.9.1</version>
+			<scope>test</scope>
+		</dependency>
+
+	</dependencies>
+
+	<build>
+		<plugins>
+			<!--
+				For the tests, we need to exclude the Log4j2 to slf4j adapter dependency
+				and let Elasticsearch directly use Log4j2, otherwise the embedded Elasticsearch node
+				used in tests will fail to work.
+
+				In other words, the connector jar is routing Elasticsearch 5.x's Log4j2 API's to SLF4J,
+				but for the test builds, we still stick to directly using Log4j2.
+			-->
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-surefire-plugin</artifactId>
+				<version>2.12.2</version>
+				<configuration>
+					<systemPropertyVariables>
+						<jna.nosys>true</jna.nosys>
+					</systemPropertyVariables>
+					<classpathDependencyExcludes>
+						<classpathDependencyExclude>org.apache.logging.log4j:log4j-to-slf4j</classpathDependencyExclude>
+					</classpathDependencyExcludes>
+				</configuration>
+			</plugin>
+		</plugins>
+	</build>
+
+</project>
diff --git a/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/Elasticsearch6ApiCallBridge.java b/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/Elasticsearch6ApiCallBridge.java
new file mode 100644
index 00000000000..782cbbcf467
--- /dev/null
+++ b/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/Elasticsearch6ApiCallBridge.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.elasticsearch6;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchApiCallBridge;
+import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
+import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
+import org.apache.flink.util.Preconditions;
+
+import org.apache.http.HttpHost;
+import org.elasticsearch.action.bulk.BackoffPolicy;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkProcessor;
+import org.elasticsearch.client.RestClient;
+import org.elasticsearch.client.RestClientBuilder;
+import org.elasticsearch.client.RestHighLevelClient;
+import org.elasticsearch.common.unit.TimeValue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nullable;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Implementation of {@link ElasticsearchApiCallBridge} for Elasticsearch 6 and later versions.
+ */
+@Internal
+public class Elasticsearch6ApiCallBridge implements ElasticsearchApiCallBridge<RestHighLevelClient> {
+
+	private static final long serialVersionUID = -5222683870097809633L;
+
+	private static final Logger LOG = LoggerFactory.getLogger(Elasticsearch6ApiCallBridge.class);
+
+	/**
+	 * User-provided HTTP Host.
+	 */
+	private final List<HttpHost> httpHosts;
+
+	/**
+	 * The factory to configure the rest client.
+	 */
+	private final RestClientFactory restClientFactory;
+
+	Elasticsearch6ApiCallBridge(List<HttpHost> httpHosts, RestClientFactory restClientFactory) {
+		Preconditions.checkArgument(httpHosts != null && !httpHosts.isEmpty());
+		this.httpHosts = httpHosts;
+		this.restClientFactory = Preconditions.checkNotNull(restClientFactory);
+	}
+
+	@Override
+	public RestHighLevelClient createClient(Map<String, String> clientConfig) throws IOException {
+		RestClientBuilder builder = RestClient.builder(httpHosts.toArray(new HttpHost[httpHosts.size()]));
+		restClientFactory.configureRestClientBuilder(builder);
+
+		RestHighLevelClient rhlClient = new RestHighLevelClient(builder);
+
+		if (LOG.isInfoEnabled()) {
+			LOG.info("Pinging Elasticsearch cluster via hosts {} ...", httpHosts);
+		}
+
+		if (!rhlClient.ping()) {
+			throw new RuntimeException("There are no reachable Elasticsearch nodes!");
+		}
+
+		if (LOG.isInfoEnabled()) {
+			LOG.info("Created Elasticsearch RestHighLevelClient connected to {}", httpHosts.toString());
+		}
+
+		return rhlClient;
+	}
+
+	@Override
+	public BulkProcessor.Builder createBulkProcessorBuilder(RestHighLevelClient client, BulkProcessor.Listener listener) {
+		return BulkProcessor.builder(client::bulkAsync, listener);
+	}
+
+	@Override
+	public Throwable extractFailureCauseFromBulkItemResponse(BulkItemResponse bulkItemResponse) {
+		if (!bulkItemResponse.isFailed()) {
+			return null;
+		} else {
+			return bulkItemResponse.getFailure().getCause();
+		}
+	}
+
+	@Override
+	public void configureBulkProcessorBackoff(
+		BulkProcessor.Builder builder,
+		@Nullable ElasticsearchSinkBase.BulkFlushBackoffPolicy flushBackoffPolicy) {
+
+		BackoffPolicy backoffPolicy;
+		if (flushBackoffPolicy != null) {
+			switch (flushBackoffPolicy.getBackoffType()) {
+				case CONSTANT:
+					backoffPolicy = BackoffPolicy.constantBackoff(
+						new TimeValue(flushBackoffPolicy.getDelayMillis()),
+						flushBackoffPolicy.getMaxRetryCount());
+					break;
+				case EXPONENTIAL:
+				default:
+					backoffPolicy = BackoffPolicy.exponentialBackoff(
+						new TimeValue(flushBackoffPolicy.getDelayMillis()),
+						flushBackoffPolicy.getMaxRetryCount());
+			}
+		} else {
+			backoffPolicy = BackoffPolicy.noBackoff();
+		}
+
+		builder.setBackoffPolicy(backoffPolicy);
+	}
+
+	@Override
+	public RequestIndexer createBulkProcessorIndexer(
+			BulkProcessor bulkProcessor,
+			boolean flushOnCheckpoint,
+			AtomicLong numPendingRequestsRef) {
+		return new Elasticsearch6BulkProcessorIndexer(
+			bulkProcessor,
+			flushOnCheckpoint,
+			numPendingRequestsRef);
+	}
+}
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java b/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/Elasticsearch6BulkProcessorIndexer.java
similarity index 58%
rename from flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java
rename to flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/Elasticsearch6BulkProcessorIndexer.java
index 2ebb97c82e2..af3c5b13a9a 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java
+++ b/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/Elasticsearch6BulkProcessorIndexer.java
@@ -16,12 +16,16 @@
  * limitations under the License.
  */
 
-package org.apache.flink.streaming.connectors.elasticsearch;
+package org.apache.flink.streaming.connectors.elasticsearch6;
 
 import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
 
 import org.elasticsearch.action.ActionRequest;
 import org.elasticsearch.action.bulk.BulkProcessor;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.update.UpdateRequest;
 
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -30,27 +34,52 @@
 /**
  * Implementation of a {@link RequestIndexer}, using a {@link BulkProcessor}.
  * {@link ActionRequest ActionRequests} will be buffered before sending a bulk request to the Elasticsearch cluster.
+ *
+ * <p>Note: This class is binary compatible to Elasticsearch 6.
  */
 @Internal
-class BulkProcessorIndexer implements RequestIndexer {
+class Elasticsearch6BulkProcessorIndexer implements RequestIndexer {
 
 	private final BulkProcessor bulkProcessor;
 	private final boolean flushOnCheckpoint;
 	private final AtomicLong numPendingRequestsRef;
 
-	BulkProcessorIndexer(BulkProcessor bulkProcessor, boolean flushOnCheckpoint, AtomicLong numPendingRequestsRef) {
+	Elasticsearch6BulkProcessorIndexer(
+			BulkProcessor bulkProcessor,
+			boolean flushOnCheckpoint,
+			AtomicLong numPendingRequestsRef) {
 		this.bulkProcessor = checkNotNull(bulkProcessor);
 		this.flushOnCheckpoint = flushOnCheckpoint;
 		this.numPendingRequestsRef = checkNotNull(numPendingRequestsRef);
 	}
 
 	@Override
-	public void add(ActionRequest... actionRequests) {
-		for (ActionRequest actionRequest : actionRequests) {
+	public void add(DeleteRequest... deleteRequests) {
+		for (DeleteRequest deleteRequest : deleteRequests) {
+			if (flushOnCheckpoint) {
+				numPendingRequestsRef.getAndIncrement();
+			}
+			this.bulkProcessor.add(deleteRequest);
+		}
+	}
+
+	@Override
+	public void add(IndexRequest... indexRequests) {
+		for (IndexRequest indexRequest : indexRequests) {
+			if (flushOnCheckpoint) {
+				numPendingRequestsRef.getAndIncrement();
+			}
+			this.bulkProcessor.add(indexRequest);
+		}
+	}
+
+	@Override
+	public void add(UpdateRequest... updateRequests) {
+		for (UpdateRequest updateRequest : updateRequests) {
 			if (flushOnCheckpoint) {
 				numPendingRequestsRef.getAndIncrement();
 			}
-			this.bulkProcessor.add(actionRequest);
+			this.bulkProcessor.add(updateRequest);
 		}
 	}
 }
diff --git a/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/ElasticsearchSink.java b/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/ElasticsearchSink.java
new file mode 100644
index 00000000000..4e7a2635738
--- /dev/null
+++ b/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/ElasticsearchSink.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.elasticsearch6;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
+import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
+import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
+import org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler;
+import org.apache.flink.util.Preconditions;
+
+import org.apache.http.HttpHost;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.bulk.BulkProcessor;
+import org.elasticsearch.client.RestHighLevelClient;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Elasticsearch 6.x sink that requests multiple {@link ActionRequest ActionRequests}
+ * against a cluster for each incoming element.
+ *
+ * <p>The sink internally uses a {@link RestHighLevelClient} to communicate with an Elasticsearch cluster.
+ * The sink will fail if no cluster can be connected to using the provided transport addresses passed to the constructor.
+ *
+ * <p>Internally, the sink will use a {@link BulkProcessor} to send {@link ActionRequest ActionRequests}.
+ * This will buffer elements before sending a request to the cluster. The behaviour of the
+ * {@code BulkProcessor} can be configured using these config keys:
+ * <ul>
+ *   <li> {@code bulk.flush.max.actions}: Maximum amount of elements to buffer
+ *   <li> {@code bulk.flush.max.size.mb}: Maximum amount of data (in megabytes) to buffer
+ *   <li> {@code bulk.flush.interval.ms}: Interval at which to flush data regardless of the other two
+ *   settings in milliseconds
+ * </ul>
+ *
+ * <p>You also have to provide an {@link ElasticsearchSinkFunction}. This is used to create multiple
+ * {@link ActionRequest ActionRequests} for each incoming element. See the class level documentation of
+ * {@link ElasticsearchSinkFunction} for an example.
+ *
+ * @param <T> Type of the elements handled by this sink
+ */
+@PublicEvolving
+public class ElasticsearchSink<T> extends ElasticsearchSinkBase<T, RestHighLevelClient> {
+
+	private static final long serialVersionUID = 1L;
+
+	private ElasticsearchSink(
+		Map<String, String> bulkRequestsConfig,
+		List<HttpHost> httpHosts,
+		ElasticsearchSinkFunction<T> elasticsearchSinkFunction,
+		ActionRequestFailureHandler failureHandler,
+		RestClientFactory restClientFactory) {
+
+		super(new Elasticsearch6ApiCallBridge(httpHosts, restClientFactory),  bulkRequestsConfig, elasticsearchSinkFunction, failureHandler);
+	}
+
+	/**
+	 * A builder for creating an {@link ElasticsearchSink}.
+	 *
+	 * @param <T> Type of the elements handled by the sink this builder creates.
+	 */
+	@PublicEvolving
+	public static class Builder<T> {
+
+		private final List<HttpHost> httpHosts;
+		private final ElasticsearchSinkFunction<T> elasticsearchSinkFunction;
+
+		private Map<String, String> bulkRequestsConfig = new HashMap<>();
+		private ActionRequestFailureHandler failureHandler = new NoOpFailureHandler();
+		private RestClientFactory restClientFactory = restClientBuilder -> {};
+
+		/**
+		 * Creates a new {@code ElasticsearchSink} that connects to the cluster using a {@link RestHighLevelClient}.
+		 *
+		 * @param httpHosts The list of {@link HttpHost} to which the {@link RestHighLevelClient} connects to.
+		 * @param elasticsearchSinkFunction This is used to generate multiple {@link ActionRequest} from the incoming element.
+		 */
+		public Builder(List<HttpHost> httpHosts, ElasticsearchSinkFunction<T> elasticsearchSinkFunction) {
+			this.httpHosts = Preconditions.checkNotNull(httpHosts);
+			this.elasticsearchSinkFunction = Preconditions.checkNotNull(elasticsearchSinkFunction);
+		}
+
+		/**
+		 * Sets the maximum number of actions to buffer for each bulk request.
+		 *
+		 * @param numMaxActions the maxinum number of actions to buffer per bulk request.
+		 */
+		public void setBulkFlushMaxActions(int numMaxActions) {
+			Preconditions.checkArgument(
+				numMaxActions > 0,
+				"Max number of buffered actions must be larger than 0.");
+
+			this.bulkRequestsConfig.put(CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, String.valueOf(numMaxActions));
+		}
+
+		/**
+		 * Sets the maximum size of buffered actions, in mb, per bulk request.
+		 *
+		 * @param maxSizeMb the maximum size of buffered actions, in mb.
+		 */
+		public void setBulkFlushMaxSizeMb(int maxSizeMb) {
+			Preconditions.checkArgument(
+				maxSizeMb > 0,
+				"Max size of buffered actions must be larger than 0.");
+
+			this.bulkRequestsConfig.put(CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB, String.valueOf(maxSizeMb));
+		}
+
+		/**
+		 * Sets the bulk flush interval, in milliseconds.
+		 *
+		 * @param intervalMillis the bulk flush interval, in milliseconds.
+		 */
+		public void setBulkFlushInterval(long intervalMillis) {
+			Preconditions.checkArgument(
+				intervalMillis >= 0,
+				"Interval (in milliseconds) between each flush must be larger than or equal to 0.");
+
+			this.bulkRequestsConfig.put(CONFIG_KEY_BULK_FLUSH_INTERVAL_MS, String.valueOf(intervalMillis));
+		}
+
+		/**
+		 * Sets whether or not to enable bulk flush backoff behaviour.
+		 *
+		 * @param enabled whether or not to enable backoffs.
+		 */
+		public void setBulkFlushBackoff(boolean enabled) {
+			this.bulkRequestsConfig.put(CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE, String.valueOf(enabled));
+		}
+
+		/**
+		 * Sets the type of back of to use when flushing bulk requests.
+		 *
+		 * @param flushBackoffType the backoff type to use.
+		 */
+		public void setBulkFlushBackoffType(FlushBackoffType flushBackoffType) {
+			this.bulkRequestsConfig.put(
+				CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE,
+				Preconditions.checkNotNull(flushBackoffType).toString());
+		}
+
+		/**
+		 * Sets the maximum number of retries for a backoff attempt when flushing bulk requests.
+		 *
+		 * @param maxRetries the maximum number of retries for a backoff attempt when flushing bulk requests
+		 */
+		public void setBulkFlushBackoffRetries(int maxRetries) {
+			Preconditions.checkArgument(
+				maxRetries > 0,
+				"Max number of backoff attempts must be larger than 0.");
+
+			this.bulkRequestsConfig.put(CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES, String.valueOf(maxRetries));
+		}
+
+		/**
+		 * Sets the amount of delay between each backoff attempt when flushing bulk requests, in milliseconds.
+		 *
+		 * @param delayMillis the amount of delay between each backoff attempt when flushing bulk requests, in milliseconds.
+		 */
+		public void setBulkFlushBackoffDelay(long delayMillis) {
+			Preconditions.checkArgument(
+				delayMillis >= 0,
+				"Delay (in milliseconds) between each backoff attempt must be larger than or equal to 0.");
+			this.bulkRequestsConfig.put(CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY, String.valueOf(delayMillis));
+		}
+
+		/**
+		 * Sets a failure handler for action requests.
+		 *
+		 * @param failureHandler This is used to handle failed {@link ActionRequest}.
+		 */
+		public void setFailureHandler(ActionRequestFailureHandler failureHandler) {
+			this.failureHandler = Preconditions.checkNotNull(failureHandler);
+		}
+
+		/**
+		 * Sets a REST client factory for custom client configuration.
+		 *
+		 * @param restClientFactory the factory that configures the rest client.
+		 */
+		public void setRestClientFactory(RestClientFactory restClientFactory) {
+			this.restClientFactory = Preconditions.checkNotNull(restClientFactory);
+		}
+
+		/**
+		 * Creates the Elasticsearch sink.
+		 *
+		 * @return the created Elasticsearch sink.
+		 */
+		public ElasticsearchSink<T> build() {
+			return new ElasticsearchSink<>(bulkRequestsConfig, httpHosts, elasticsearchSinkFunction, failureHandler, restClientFactory);
+		}
+	}
+}
diff --git a/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/RestClientFactory.java b/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/RestClientFactory.java
new file mode 100644
index 00000000000..4b74649ca87
--- /dev/null
+++ b/flink-connectors/flink-connector-elasticsearch6/src/main/java/org/apache/flink/streaming/connectors/elasticsearch6/RestClientFactory.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.elasticsearch6;
+
+import org.apache.flink.annotation.PublicEvolving;
+
+import org.elasticsearch.client.RestClientBuilder;
+
+import java.io.Serializable;
+
+/**
+ * A factory that is used to configure the {@link org.elasticsearch.client.RestHighLevelClient} internally
+ * used in the {@link ElasticsearchSink}.
+ */
+@PublicEvolving
+public interface RestClientFactory extends Serializable {
+
+	/**
+	 * Configures the rest client builder.
+	 *
+	 * @param restClientBuilder the configured rest client builder.
+	 */
+	void configureRestClientBuilder(RestClientBuilder restClientBuilder);
+
+}
diff --git a/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironmentImpl.java b/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironmentImpl.java
new file mode 100644
index 00000000000..8dc62168049
--- /dev/null
+++ b/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/EmbeddedElasticsearchNodeEnvironmentImpl.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.elasticsearch;
+
+import org.apache.flink.streaming.connectors.elasticsearch6.ElasticsearchSinkITCase;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.InternalSettingsPreparer;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.transport.Netty4Plugin;
+
+import java.io.File;
+import java.util.Collections;
+
+/**
+ * Implementation of {@link EmbeddedElasticsearchNodeEnvironment} for Elasticsearch 6.
+ * Will be dynamically loaded in {@link ElasticsearchSinkITCase} for integration tests.
+ */
+public class EmbeddedElasticsearchNodeEnvironmentImpl implements EmbeddedElasticsearchNodeEnvironment {
+
+	private Node node;
+
+	@Override
+	public void start(File tmpDataFolder, String clusterName) throws Exception {
+		if (node == null) {
+			Settings settings = Settings.builder()
+				.put("cluster.name", clusterName)
+				.put("http.enabled", true)
+				.put("path.home", tmpDataFolder.getParent())
+				.put("path.data", tmpDataFolder.getAbsolutePath())
+				.build();
+
+			node = new PluginNode(settings);
+			node.start();
+		}
+	}
+
+	@Override
+	public void close() throws Exception {
+		if (node != null && !node.isClosed()) {
+			node.close();
+			node = null;
+		}
+	}
+
+	@Override
+	public Client getClient() {
+		if (node != null && !node.isClosed()) {
+			return node.client();
+		} else {
+			return null;
+		}
+	}
+
+	private static class PluginNode extends Node {
+		public PluginNode(Settings settings) {
+			super(InternalSettingsPreparer.prepareEnvironment(settings, null), Collections.<Class<? extends Plugin>>singletonList(Netty4Plugin.class));
+		}
+	}
+
+}
diff --git a/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch6/ElasticsearchSinkITCase.java b/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch6/ElasticsearchSinkITCase.java
new file mode 100644
index 00000000000..a6f01258940
--- /dev/null
+++ b/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/streaming/connectors/elasticsearch6/ElasticsearchSinkITCase.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.elasticsearch6;
+
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
+import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
+import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkTestBase;
+
+import org.apache.http.HttpHost;
+import org.elasticsearch.client.RestHighLevelClient;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * IT cases for the {@link ElasticsearchSink}.
+ *
+ * <p>The Elasticsearch ITCases for 6.x CANNOT be executed in the IDE directly, since it is required that the
+ * Log4J-to-SLF4J adapter dependency must be excluded from the test classpath for the Elasticsearch embedded
+ * node used in the tests to work properly.
+ */
+public class ElasticsearchSinkITCase extends ElasticsearchSinkTestBase<RestHighLevelClient, HttpHost> {
+
+	@Test
+	public void testElasticsearchSink() throws Exception {
+		runElasticsearchSinkTest();
+	}
+
+	@Test
+	public void testNullAddresses() throws Exception {
+		runNullAddressesTest();
+	}
+
+	@Test
+	public void testEmptyAddresses() throws Exception {
+		runEmptyAddressesTest();
+	}
+
+	@Test
+	public void testInvalidElasticsearchCluster() throws Exception{
+		runInvalidElasticsearchClusterTest();
+	}
+
+	@Override
+	protected ElasticsearchSinkBase<Tuple2<Integer, String>, RestHighLevelClient> createElasticsearchSink(
+			int bulkFlushMaxActions,
+			String clusterName,
+			List<HttpHost> httpHosts,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction) {
+
+		ElasticsearchSink.Builder<Tuple2<Integer, String>> builder = new ElasticsearchSink.Builder<>(httpHosts, elasticsearchSinkFunction);
+		builder.setBulkFlushMaxActions(bulkFlushMaxActions);
+
+		return builder.build();
+	}
+
+	@Override
+	protected ElasticsearchSinkBase<Tuple2<Integer, String>, RestHighLevelClient> createElasticsearchSinkForEmbeddedNode(
+			int bulkFlushMaxActions,
+			String clusterName,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction) throws Exception {
+
+		return createElasticsearchSinkForNode(
+				bulkFlushMaxActions, clusterName, elasticsearchSinkFunction, "127.0.0.1");
+	}
+
+	@Override
+	protected ElasticsearchSinkBase<Tuple2<Integer, String>, RestHighLevelClient> createElasticsearchSinkForNode(
+			int bulkFlushMaxActions,
+			String clusterName,
+			ElasticsearchSinkFunction<Tuple2<Integer, String>> elasticsearchSinkFunction,
+			String ipAddress) throws Exception {
+
+		ArrayList<HttpHost> httpHosts = new ArrayList<>();
+		httpHosts.add(new HttpHost(ipAddress, 9200, "http"));
+
+		ElasticsearchSink.Builder<Tuple2<Integer, String>> builder = new ElasticsearchSink.Builder<>(httpHosts, elasticsearchSinkFunction);
+		builder.setBulkFlushMaxActions(bulkFlushMaxActions);
+
+		return builder.build();
+	}
+}
diff --git a/flink-connectors/flink-connector-elasticsearch6/src/test/resources/log4j-test.properties b/flink-connectors/flink-connector-elasticsearch6/src/test/resources/log4j-test.properties
new file mode 100644
index 00000000000..fcd86546668
--- /dev/null
+++ b/flink-connectors/flink-connector-elasticsearch6/src/test/resources/log4j-test.properties
@@ -0,0 +1,24 @@
+################################################################################
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+log4j.rootLogger=INFO, testlogger
+
+log4j.appender.testlogger=org.apache.log4j.ConsoleAppender
+log4j.appender.testlogger.target=System.err
+log4j.appender.testlogger.layout=org.apache.log4j.PatternLayout
+log4j.appender.testlogger.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
diff --git a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/DateTimeBucketer.java b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/DateTimeBucketer.java
index b7035fe8ab4..edabb6b56eb 100644
--- a/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/DateTimeBucketer.java
+++ b/flink-connectors/flink-connector-filesystem/src/main/java/org/apache/flink/streaming/connectors/fs/bucketing/DateTimeBucketer.java
@@ -19,13 +19,15 @@
 package org.apache.flink.streaming.connectors.fs.bucketing;
 
 import org.apache.flink.streaming.connectors.fs.Clock;
+import org.apache.flink.util.Preconditions;
 
 import org.apache.hadoop.fs.Path;
 
 import java.io.IOException;
 import java.io.ObjectInputStream;
-import java.text.SimpleDateFormat;
-import java.util.Date;
+import java.time.Instant;
+import java.time.ZoneId;
+import java.time.format.DateTimeFormatter;
 
 /**
  * A {@link Bucketer} that assigns to buckets based on current system time.
@@ -38,8 +40,8 @@
  * is determined based on the current system time and the user provided format string.
  *
  *
- * <p>{@link SimpleDateFormat} is used to derive a date string from the current system time and
- * the date format string. The default format string is {@code "yyyy-MM-dd--HH"} so the rolling
+ * <p>{@link DateTimeFormatter} is used to derive a date string from the current system time and
+ * the date format string with a timezone. The default format string is {@code "yyyy-MM-dd--HH"} so the rolling
  * files will have a granularity of hours.
  *
  *
@@ -62,43 +64,67 @@
 
 	private final String formatString;
 
-	private transient SimpleDateFormat dateFormatter;
+	private final ZoneId zoneId;
+
+	private transient DateTimeFormatter dateTimeFormatter;
 
 	/**
-	 * Creates a new {@code DateTimeBucketer} with format string {@code "yyyy-MM-dd--HH"}.
+	 * Creates a new {@code DateTimeBucketer} with format string {@code "yyyy-MM-dd--HH"} using JVM's default timezone.
 	 */
 	public DateTimeBucketer() {
 		this(DEFAULT_FORMAT_STRING);
 	}
 
 	/**
-	 * Creates a new {@code DateTimeBucketer} with the given date/time format string.
+	 * Creates a new {@code DateTimeBucketer} with the given date/time format string using JVM's default timezone.
 	 *
-	 * @param formatString The format string that will be given to {@code SimpleDateFormat} to determine
+	 * @param formatString The format string that will be given to {@code DateTimeFormatter} to determine
 	 *                     the bucket path.
 	 */
 	public DateTimeBucketer(String formatString) {
-		this.formatString = formatString;
+		this(formatString, ZoneId.systemDefault());
+	}
+
+	/**
+	 * Creates a new {@code DateTimeBucketer} with format string {@code "yyyy-MM-dd--HH"} using the given timezone.
+	 *
+	 * @param zoneId The timezone used to format {@code DateTimeFormatter} for bucket path.
+	 */
+	public DateTimeBucketer(ZoneId zoneId) {
+		this(DEFAULT_FORMAT_STRING, zoneId);
+	}
+
+	/**
+	 * Creates a new {@code DateTimeBucketer} with the given date/time format string using the given timezone.
+	 *
+	 * @param formatString The format string that will be given to {@code DateTimeFormatter} to determine
+	 *                     the bucket path.
+	 * @param zoneId The timezone used to format {@code DateTimeFormatter} for bucket path.
+	 */
+	public DateTimeBucketer(String formatString, ZoneId zoneId) {
+		this.formatString = Preconditions.checkNotNull(formatString);
+		this.zoneId = Preconditions.checkNotNull(zoneId);
 
-		this.dateFormatter = new SimpleDateFormat(formatString);
+		this.dateTimeFormatter = DateTimeFormatter.ofPattern(this.formatString).withZone(zoneId);
 	}
 
 	private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
 		in.defaultReadObject();
 
-		this.dateFormatter = new SimpleDateFormat(formatString);
+		this.dateTimeFormatter = DateTimeFormatter.ofPattern(formatString).withZone(zoneId);
 	}
 
 	@Override
 	public Path getBucketPath(Clock clock, Path basePath, T element) {
-		String newDateTimeString = dateFormatter.format(new Date(clock.currentTimeMillis()));
+		String newDateTimeString = dateTimeFormatter.format(Instant.ofEpochMilli(clock.currentTimeMillis()));
 		return new Path(basePath + "/" + newDateTimeString);
 	}
 
 	@Override
 	public String toString() {
 		return "DateTimeBucketer{" +
-				"formatString='" + formatString + '\'' +
-				'}';
+			"formatString='" + formatString + '\'' +
+			", zoneId=" + zoneId +
+			'}';
 	}
 }
diff --git a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkMigrationTest.java b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkMigrationTest.java
index 307f10fb558..38cc9149ee2 100644
--- a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkMigrationTest.java
+++ b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkMigrationTest.java
@@ -80,12 +80,16 @@ public static void verifyOS() {
 		Assume.assumeTrue("HDFS cluster cannot be started on Windows without extensions.", !OperatingSystem.isWindows());
 	}
 
+	/**
+	 * The bucket file prefix is the absolute path to the part files, which is stored within the savepoint.
+	 */
 	@Parameterized.Parameters(name = "Migration Savepoint / Bucket Files Prefix: {0}")
 	public static Collection<Tuple2<MigrationVersion, String>> parameters () {
 		return Arrays.asList(
 			Tuple2.of(MigrationVersion.v1_2, "/var/folders/v_/ry2wp5fx0y7c1rvr41xy9_700000gn/T/junit9160378385359106772/junit479663758539998903/1970-01-01--01/part-0-"),
 			Tuple2.of(MigrationVersion.v1_3, "/var/folders/tv/b_1d8fvx23dgk1_xs8db_95h0000gn/T/junit4273542175898623023/junit3801102997056424640/1970-01-01--01/part-0-"),
-			Tuple2.of(MigrationVersion.v1_4, "/var/folders/tv/b_1d8fvx23dgk1_xs8db_95h0000gn/T/junit3198043255809479705/junit8947526563966405708/1970-01-01--01/part-0-"));
+			Tuple2.of(MigrationVersion.v1_4, "/var/folders/tv/b_1d8fvx23dgk1_xs8db_95h0000gn/T/junit3198043255809479705/junit8947526563966405708/1970-01-01--01/part-0-"),
+			Tuple2.of(MigrationVersion.v1_5, "/tmp/junit4927100426019463155/junit2465610012100182280/1970-01-01--00/part-0-"));
 	}
 
 	private final MigrationVersion testMigrateVersion;
diff --git a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/DateTimeBucketerTest.java b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/DateTimeBucketerTest.java
new file mode 100644
index 00000000000..3ce9f496c7f
--- /dev/null
+++ b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/DateTimeBucketerTest.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.fs.bucketing;
+
+import org.apache.flink.streaming.connectors.fs.Clock;
+
+import org.apache.hadoop.fs.Path;
+import org.junit.Test;
+
+import java.time.ZoneId;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Tests for {@link DateTimeBucketer}.
+ */
+public class DateTimeBucketerTest {
+	private static final long TEST_TIME_IN_MILLIS = 1533363082011L;
+	private static final Path TEST_PATH = new Path("test");
+
+	private static final Clock mockedClock = new MockedClock();
+
+	@Test
+	public void testGetBucketPathWithSpecifiedTimezone() {
+		DateTimeBucketer bucketer = new DateTimeBucketer(ZoneId.of("America/Los_Angeles"));
+
+		assertEquals(new Path("test/2018-08-03--23"), bucketer.getBucketPath(mockedClock, TEST_PATH, null));
+	}
+
+	@Test
+	public void testGetBucketPathWithSpecifiedFormatString() {
+		DateTimeBucketer bucketer = new DateTimeBucketer("yyyy-MM-dd-HH", ZoneId.of("America/Los_Angeles"));
+
+		assertEquals(new Path("test/2018-08-03-23"), bucketer.getBucketPath(mockedClock, TEST_PATH, null));
+	}
+
+	private static class MockedClock implements Clock {
+
+		@Override
+		public long currentTimeMillis() {
+			return TEST_TIME_IN_MILLIS;
+		}
+	}
+}
diff --git a/flink-connectors/flink-connector-filesystem/src/test/resources/bucketing-sink-migration-test-flink1.5-snapshot b/flink-connectors/flink-connector-filesystem/src/test/resources/bucketing-sink-migration-test-flink1.5-snapshot
new file mode 100644
index 00000000000..eab6510f4fe
Binary files /dev/null and b/flink-connectors/flink-connector-filesystem/src/test/resources/bucketing-sink-migration-test-flink1.5-snapshot differ
diff --git a/flink-connectors/flink-connector-kafka-0.10/pom.xml b/flink-connectors/flink-connector-kafka-0.10/pom.xml
index 2fb7a323976..9b2353dc8ff 100644
--- a/flink-connectors/flink-connector-kafka-0.10/pom.xml
+++ b/flink-connectors/flink-connector-kafka-0.10/pom.xml
@@ -203,12 +203,12 @@ under the License.
 	</dependencies>
 
 	<profiles>
+		<!-- Create SQL Client uber jars by default -->
 		<profile>
-			<!-- Create SQL Client uber jars for releases -->
-			<id>release</id>
+			<id>sql-jars</id>
 			<activation>
 				<property>
-					<name>release</name>
+					<name>!skipSqlJars</name>
 				</property>
 			</activation>
 			<build>
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java
index ebbadcff1e1..d9d0a91ced1 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010AvroTableSource.java
@@ -39,7 +39,7 @@
  * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
  *             with descriptors for schema and format instead. Descriptors allow for
  *             implementation-agnostic definition of tables. See also
- *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+ *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
  */
 @Deprecated
 public class Kafka010AvroTableSource extends KafkaAvroTableSource {
@@ -120,7 +120,7 @@ public void setRowtimeAttributeDescriptor(RowtimeAttributeDescriptor rowtimeAttr
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static Builder builder() {
@@ -133,7 +133,7 @@ public static Builder builder() {
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static class Builder extends KafkaAvroTableSource.Builder<Kafka010AvroTableSource, Kafka010AvroTableSource.Builder> {
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
index ef33cd56237..8471908a9cf 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
@@ -18,18 +18,24 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
+import org.apache.flink.table.descriptors.ConnectorDescriptor;
 import org.apache.flink.types.Row;
 
+import java.util.Optional;
 import java.util.Properties;
 
 /**
  * Kafka 0.10 {@link KafkaTableSink} that serializes data in JSON format.
+ *
+ * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
+ *             with descriptors for schema and format instead. Descriptors allow for
+ *             implementation-agnostic definition of tables. See also
+ *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
  */
-@PublicEvolving
+@Deprecated
 public class Kafka010JsonTableSink extends KafkaJsonTableSink {
 
 	/**
@@ -46,7 +52,9 @@
 	 *
 	 * @param topic topic in Kafka to which table is written
 	 * @param properties properties to connect to Kafka
+	 * @deprecated Use table descriptors instead of implementation-specific classes.
 	 */
+	@Deprecated
 	public Kafka010JsonTableSink(String topic, Properties properties) {
 		super(topic, properties, new FlinkFixedPartitioner<>());
 	}
@@ -58,18 +66,31 @@ public Kafka010JsonTableSink(String topic, Properties properties) {
 	 * @param topic topic in Kafka to which table is written
 	 * @param properties properties to connect to Kafka
 	 * @param partitioner Kafka partitioner
+	 * @deprecated Use table descriptors instead of implementation-specific classes.
 	 */
+	@Deprecated
 	public Kafka010JsonTableSink(String topic, Properties properties, FlinkKafkaPartitioner<Row> partitioner) {
 		super(topic, properties, partitioner);
 	}
 
 	@Override
-	protected FlinkKafkaProducerBase<Row> createKafkaProducer(String topic, Properties properties, SerializationSchema<Row> serializationSchema, FlinkKafkaPartitioner<Row> partitioner) {
-		return new FlinkKafkaProducer010<>(topic, serializationSchema, properties, partitioner);
+	protected FlinkKafkaProducerBase<Row> createKafkaProducer(
+			String topic,
+			Properties properties,
+			SerializationSchema<Row> serializationSchema,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner) {
+		return new FlinkKafkaProducer010<>(
+			topic,
+			serializationSchema,
+			properties,
+			partitioner.orElse(new FlinkFixedPartitioner<>()));
 	}
 
 	@Override
 	protected Kafka010JsonTableSink createCopy() {
-		return new Kafka010JsonTableSink(topic, properties, partitioner);
+		return new Kafka010JsonTableSink(
+			topic,
+			properties,
+			partitioner.orElse(new FlinkFixedPartitioner<>()));
 	}
 }
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java
index a5e33a12715..38d9034a9a7 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSource.java
@@ -36,7 +36,7 @@
  * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
  *             with descriptors for schema and format instead. Descriptors allow for
  *             implementation-agnostic definition of tables. See also
- *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+ *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
  */
 @Deprecated
 public class Kafka010JsonTableSource extends KafkaJsonTableSource {
@@ -121,7 +121,7 @@ public void setRowtimeAttributeDescriptor(RowtimeAttributeDescriptor rowtimeAttr
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static Kafka010JsonTableSource.Builder builder() {
@@ -134,7 +134,7 @@ public void setRowtimeAttributeDescriptor(RowtimeAttributeDescriptor rowtimeAttr
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static class Builder extends KafkaJsonTableSource.Builder<Kafka010JsonTableSource, Kafka010JsonTableSource.Builder> {
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSink.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSink.java
new file mode 100644
index 00000000000..1d408b8ab52
--- /dev/null
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSink.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kafka;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
+import org.apache.flink.table.api.TableSchema;
+import org.apache.flink.types.Row;
+
+import java.util.Optional;
+import java.util.Properties;
+
+/**
+ * Kafka 0.10 table sink for writing data into Kafka.
+ */
+@Internal
+public class Kafka010TableSink extends KafkaTableSink {
+
+	public Kafka010TableSink(
+			TableSchema schema,
+			String topic,
+			Properties properties,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner,
+			SerializationSchema<Row> serializationSchema) {
+		super(
+			schema,
+			topic,
+			properties,
+			partitioner,
+			serializationSchema);
+	}
+
+	@Override
+	protected FlinkKafkaProducerBase<Row> createKafkaProducer(
+			String topic,
+			Properties properties,
+			SerializationSchema<Row> serializationSchema,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner) {
+		return new FlinkKafkaProducer010<>(
+			topic,
+			serializationSchema,
+			properties,
+			partitioner.orElse(null));
+	}
+}
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSourceFactory.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSourceSinkFactory.java
similarity index 79%
rename from flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSourceFactory.java
rename to flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSourceSinkFactory.java
index 4a860162222..ecf12b27a08 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSourceFactory.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSourceSinkFactory.java
@@ -19,8 +19,10 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.descriptors.KafkaValidator;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -34,7 +36,7 @@
 /**
  * Factory for creating configured instances of {@link Kafka010TableSource}.
  */
-public class Kafka010TableSourceFactory extends KafkaTableSourceFactory {
+public class Kafka010TableSourceSinkFactory extends KafkaTableSourceSinkFactoryBase {
 
 	@Override
 	protected String kafkaVersion() {
@@ -69,4 +71,20 @@ protected KafkaTableSource createKafkaTableSource(
 			startupMode,
 			specificStartupOffsets);
 	}
+
+	@Override
+	protected KafkaTableSink createKafkaTableSink(
+			TableSchema schema,
+			String topic,
+			Properties properties,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner,
+			SerializationSchema<Row> serializationSchema) {
+
+		return new Kafka010TableSink(
+			schema,
+			topic,
+			properties,
+			partitioner,
+			serializationSchema);
+	}
 }
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory b/flink-connectors/flink-connector-kafka-0.10/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
index 21f57077d63..9bb036302f8 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
+++ b/flink-connectors/flink-connector-kafka-0.10/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
@@ -13,4 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-org.apache.flink.streaming.connectors.kafka.Kafka010TableSourceFactory
+org.apache.flink.streaming.connectors.kafka.Kafka010TableSourceSinkFactory
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSinkTest.java b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSinkTest.java
index af562c6fa9e..9208f6583b8 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSinkTest.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSinkTest.java
@@ -27,7 +27,11 @@
 
 /**
  * Tests for the {@link Kafka010JsonTableSink}.
+ *
+ * @deprecated Ensures backwards compatibility with Flink 1.5. Can be removed once we
+ *             drop support for format-specific table sinks.
  */
+@Deprecated
 public class Kafka010JsonTableSinkTest extends KafkaTableSinkTestBase {
 
 	@Override
@@ -36,7 +40,10 @@ protected KafkaTableSink createTableSink(
 			Properties properties,
 			FlinkKafkaPartitioner<Row> partitioner) {
 
-		return new Kafka010JsonTableSink(topic, properties, partitioner);
+		return new Kafka010JsonTableSink(
+			topic,
+			properties,
+			partitioner);
 	}
 
 	@Override
diff --git a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSourceFactoryTest.java b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSourceSinkFactoryTest.java
similarity index 73%
rename from flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSourceFactoryTest.java
rename to flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSourceSinkFactoryTest.java
index ff3b0b0001f..dac8a4dacdd 100644
--- a/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSourceFactoryTest.java
+++ b/flink-connectors/flink-connector-kafka-0.10/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSourceSinkFactoryTest.java
@@ -19,8 +19,10 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.descriptors.KafkaValidator;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -32,9 +34,10 @@
 import java.util.Properties;
 
 /**
- * Test for {@link Kafka010TableSource} created by {@link Kafka010TableSourceFactory}.
+ * Test for {@link Kafka010TableSource} and {@link Kafka010TableSink} created
+ * by {@link Kafka010TableSourceSinkFactory}.
  */
-public class Kafka010TableSourceFactoryTest extends KafkaTableSourceFactoryTestBase {
+public class Kafka010TableSourceSinkFactoryTest extends KafkaTableSourceSinkFactoryTestBase {
 
 	@Override
 	protected String getKafkaVersion() {
@@ -47,6 +50,11 @@ protected String getKafkaVersion() {
 		return (Class) FlinkKafkaConsumer010.class;
 	}
 
+	@Override
+	protected Class<?> getExpectedFlinkKafkaProducer() {
+		return FlinkKafkaProducer010.class;
+	}
+
 	@Override
 	protected KafkaTableSource getExpectedKafkaTableSource(
 			TableSchema schema,
@@ -71,4 +79,21 @@ protected KafkaTableSource getExpectedKafkaTableSource(
 			specificStartupOffsets
 		);
 	}
+
+	@Override
+	protected KafkaTableSink getExpectedKafkaTableSink(
+			TableSchema schema,
+			String topic,
+			Properties properties,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner,
+			SerializationSchema<Row> serializationSchema) {
+
+		return new Kafka010TableSink(
+			schema,
+			topic,
+			properties,
+			partitioner,
+			serializationSchema
+		);
+	}
 }
diff --git a/flink-connectors/flink-connector-kafka-0.11/pom.xml b/flink-connectors/flink-connector-kafka-0.11/pom.xml
index aa600049580..4ff1d960629 100644
--- a/flink-connectors/flink-connector-kafka-0.11/pom.xml
+++ b/flink-connectors/flink-connector-kafka-0.11/pom.xml
@@ -212,12 +212,12 @@ under the License.
 	</dependencies>
 
 	<profiles>
+		<!-- Create SQL Client uber jars by default -->
 		<profile>
-			<!-- Create SQL Client uber jars for releases -->
-			<id>release</id>
+			<id>sql-jars</id>
 			<activation>
 				<property>
-					<name>release</name>
+					<name>!skipSqlJars</name>
 				</property>
 			</activation>
 			<build>
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011AvroTableSource.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011AvroTableSource.java
index b3f4e0a6e75..fab592f1bda 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011AvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011AvroTableSource.java
@@ -39,7 +39,7 @@
  * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
  *             with descriptors for schema and format instead. Descriptors allow for
  *             implementation-agnostic definition of tables. See also
- *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+ *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
  */
 @Deprecated
 public class Kafka011AvroTableSource extends KafkaAvroTableSource {
@@ -119,7 +119,7 @@ public void setRowtimeAttributeDescriptor(RowtimeAttributeDescriptor rowtimeAttr
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static Builder builder() {
@@ -132,7 +132,7 @@ public static Builder builder() {
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static class Builder extends KafkaAvroTableSource.Builder<Kafka011AvroTableSource, Kafka011AvroTableSource.Builder> {
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011JsonTableSource.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011JsonTableSource.java
index 74c5007d80a..375eeadffb9 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011JsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011JsonTableSource.java
@@ -36,7 +36,7 @@
  * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
  *             with descriptors for schema and format instead. Descriptors allow for
  *             implementation-agnostic definition of tables. See also
- *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+ *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
  */
 @Deprecated
 public class Kafka011JsonTableSource extends KafkaJsonTableSource {
@@ -121,7 +121,7 @@ public void setRowtimeAttributeDescriptor(RowtimeAttributeDescriptor rowtimeAttr
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static Kafka011JsonTableSource.Builder builder() {
@@ -134,7 +134,7 @@ public void setRowtimeAttributeDescriptor(RowtimeAttributeDescriptor rowtimeAttr
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static class Builder extends KafkaJsonTableSource.Builder<Kafka011JsonTableSource, Kafka011JsonTableSource.Builder> {
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSink.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSink.java
new file mode 100644
index 00000000000..8d81a5b59a1
--- /dev/null
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSink.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kafka;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.streaming.api.functions.sink.SinkFunction;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
+import org.apache.flink.streaming.util.serialization.KeyedSerializationSchemaWrapper;
+import org.apache.flink.table.api.TableSchema;
+import org.apache.flink.types.Row;
+
+import java.util.Optional;
+import java.util.Properties;
+
+/**
+ * Kafka 0.11 table sink for writing data into Kafka.
+ */
+@Internal
+public class Kafka011TableSink extends KafkaTableSink {
+
+	public Kafka011TableSink(
+			TableSchema schema,
+			String topic,
+			Properties properties,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner,
+			SerializationSchema<Row> serializationSchema) {
+		super(
+			schema,
+			topic,
+			properties,
+			partitioner,
+			serializationSchema);
+	}
+
+	@Override
+	protected SinkFunction<Row> createKafkaProducer(
+			String topic,
+			Properties properties,
+			SerializationSchema<Row> serializationSchema,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner) {
+		return new FlinkKafkaProducer011<>(
+			topic,
+			new KeyedSerializationSchemaWrapper<>(serializationSchema),
+			properties,
+			partitioner);
+	}
+}
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSource.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSource.java
index 85f566925bb..a646317fe0a 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSource.java
@@ -58,7 +58,8 @@ public Kafka011TableSource(
 			Optional<String> proctimeAttribute,
 			List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
 			Optional<Map<String, String>> fieldMapping,
-			String topic, Properties properties,
+			String topic,
+			Properties properties,
 			DeserializationSchema<Row> deserializationSchema,
 			StartupMode startupMode,
 			Map<KafkaTopicPartition, Long> specificStartupOffsets) {
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSourceFactory.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSourceSinkFactory.java
similarity index 79%
rename from flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSourceFactory.java
rename to flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSourceSinkFactory.java
index b1e392921a3..e6f677fb568 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSourceFactory.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSourceSinkFactory.java
@@ -19,8 +19,10 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.descriptors.KafkaValidator;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -34,7 +36,7 @@
 /**
  * Factory for creating configured instances of {@link Kafka011TableSource}.
  */
-public class Kafka011TableSourceFactory extends KafkaTableSourceFactory {
+public class Kafka011TableSourceSinkFactory extends KafkaTableSourceSinkFactoryBase {
 
 	@Override
 	protected String kafkaVersion() {
@@ -69,4 +71,20 @@ protected KafkaTableSource createKafkaTableSource(
 			startupMode,
 			specificStartupOffsets);
 	}
+
+	@Override
+	protected KafkaTableSink createKafkaTableSink(
+			TableSchema schema,
+			String topic,
+			Properties properties,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner,
+			SerializationSchema<Row> serializationSchema) {
+
+		return new Kafka011TableSink(
+			schema,
+			topic,
+			properties,
+			partitioner,
+			serializationSchema);
+	}
 }
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory b/flink-connectors/flink-connector-kafka-0.11/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
index c056097ef8c..b59b4a77ef5 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
@@ -13,4 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-org.apache.flink.streaming.connectors.kafka.Kafka011TableSourceFactory
+org.apache.flink.streaming.connectors.kafka.Kafka011TableSourceSinkFactory
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSourceFactoryTest.java b/flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSourceSinkFactoryTest.java
similarity index 73%
rename from flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSourceFactoryTest.java
rename to flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSourceSinkFactoryTest.java
index abaa49081f0..f4614761d21 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSourceFactoryTest.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka011TableSourceSinkFactoryTest.java
@@ -19,8 +19,10 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.descriptors.KafkaValidator;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -32,9 +34,10 @@
 import java.util.Properties;
 
 /**
- * Test for {@link Kafka011TableSource} created by {@link Kafka011TableSourceFactory}.
+ * Test for {@link Kafka011TableSource} and {@link Kafka011TableSink} created
+ * by {@link Kafka011TableSourceSinkFactory}.
  */
-public class Kafka011TableSourceFactoryTest extends KafkaTableSourceFactoryTestBase {
+public class Kafka011TableSourceSinkFactoryTest extends KafkaTableSourceSinkFactoryTestBase {
 
 	@Override
 	protected String getKafkaVersion() {
@@ -47,6 +50,11 @@ protected String getKafkaVersion() {
 		return (Class) FlinkKafkaConsumer011.class;
 	}
 
+	@Override
+	protected Class<?> getExpectedFlinkKafkaProducer() {
+		return FlinkKafkaProducer011.class;
+	}
+
 	@Override
 	protected KafkaTableSource getExpectedKafkaTableSource(
 			TableSchema schema,
@@ -71,4 +79,21 @@ protected KafkaTableSource getExpectedKafkaTableSource(
 			specificStartupOffsets
 		);
 	}
+
+	@Override
+	protected KafkaTableSink getExpectedKafkaTableSink(
+			TableSchema schema,
+			String topic,
+			Properties properties,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner,
+			SerializationSchema<Row> serializationSchema) {
+
+		return new Kafka011TableSink(
+			schema,
+			topic,
+			properties,
+			partitioner,
+			serializationSchema
+		);
+	}
 }
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java
index 820628796c5..61c96bf1e5a 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08AvroTableSource.java
@@ -39,7 +39,7 @@
  * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
  *             with descriptors for schema and format instead. Descriptors allow for
  *             implementation-agnostic definition of tables. See also
- *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+ *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
  */
 @Deprecated
 public class Kafka08AvroTableSource extends KafkaAvroTableSource {
@@ -120,7 +120,7 @@ public void setRowtimeAttributeDescriptor(RowtimeAttributeDescriptor rowtimeAttr
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static Builder builder() {
@@ -133,7 +133,7 @@ public static Builder builder() {
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static class Builder extends KafkaAvroTableSource.Builder<Kafka08AvroTableSource, Kafka08AvroTableSource.Builder> {
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java
index c60288d3f01..189a9fdf46b 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSink.java
@@ -18,20 +18,26 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaDelegatePartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.KafkaPartitioner;
+import org.apache.flink.table.descriptors.ConnectorDescriptor;
 import org.apache.flink.types.Row;
 
+import java.util.Optional;
 import java.util.Properties;
 
 /**
  * Kafka 0.8 {@link KafkaTableSink} that serializes data in JSON format.
+ *
+ * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
+ *             with descriptors for schema and format instead. Descriptors allow for
+ *             implementation-agnostic definition of tables. See also
+ *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
  */
-@PublicEvolving
+@Deprecated
 public class Kafka08JsonTableSink extends KafkaJsonTableSink {
 
 	/**
@@ -48,7 +54,9 @@
 	 *
 	 * @param topic topic in Kafka to which table is written
 	 * @param properties properties to connect to Kafka
+	 * @deprecated Use table descriptors instead of implementation-specific classes.
 	 */
+	@Deprecated
 	public Kafka08JsonTableSink(String topic, Properties properties) {
 		super(topic, properties, new FlinkFixedPartitioner<>());
 	}
@@ -60,7 +68,9 @@ public Kafka08JsonTableSink(String topic, Properties properties) {
 	 * @param topic topic in Kafka to which table is written
 	 * @param properties properties to connect to Kafka
 	 * @param partitioner Kafka partitioner
+	 * @deprecated Use table descriptors instead of implementation-specific classes.
 	 */
+	@Deprecated
 	public Kafka08JsonTableSink(String topic, Properties properties, FlinkKafkaPartitioner<Row> partitioner) {
 		super(topic, properties, partitioner);
 	}
@@ -83,13 +93,24 @@ public Kafka08JsonTableSink(String topic, Properties properties, KafkaPartitione
 	}
 
 	@Override
-	protected FlinkKafkaProducerBase<Row> createKafkaProducer(String topic, Properties properties, SerializationSchema<Row> serializationSchema, FlinkKafkaPartitioner<Row> partitioner) {
-		return new FlinkKafkaProducer08<>(topic, serializationSchema, properties, partitioner);
+	protected FlinkKafkaProducerBase<Row> createKafkaProducer(
+			String topic,
+			Properties properties,
+			SerializationSchema<Row> serializationSchema,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner) {
+		return new FlinkKafkaProducer08<>(
+			topic,
+			serializationSchema,
+			properties,
+			partitioner.orElse(new FlinkFixedPartitioner<>()));
 	}
 
 	@Override
 	protected Kafka08JsonTableSink createCopy() {
-		return new Kafka08JsonTableSink(topic, properties, partitioner);
+		return new Kafka08JsonTableSink(
+			topic,
+			properties,
+			partitioner.orElse(new FlinkFixedPartitioner<>()));
 	}
 }
 
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java
index acb5783c262..dc5a077a89c 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSource.java
@@ -36,7 +36,7 @@
  * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
  *             with descriptors for schema and format instead. Descriptors allow for
  *             implementation-agnostic definition of tables. See also
- *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+ *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
  */
 @Deprecated
 public class Kafka08JsonTableSource extends KafkaJsonTableSource {
@@ -120,7 +120,7 @@ public void setRowtimeAttributeDescriptor(RowtimeAttributeDescriptor rowtimeAttr
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static Kafka08JsonTableSource.Builder builder() {
@@ -133,7 +133,7 @@ public void setRowtimeAttributeDescriptor(RowtimeAttributeDescriptor rowtimeAttr
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static class Builder extends KafkaJsonTableSource.Builder<Kafka08JsonTableSource, Kafka08JsonTableSource.Builder> {
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSink.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSink.java
new file mode 100644
index 00000000000..146cfc90739
--- /dev/null
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSink.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kafka;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
+import org.apache.flink.table.api.TableSchema;
+import org.apache.flink.types.Row;
+
+import java.util.Optional;
+import java.util.Properties;
+
+/**
+ * Kafka 0.8 table sink for writing data into Kafka.
+ */
+@Internal
+public class Kafka08TableSink extends KafkaTableSink {
+
+	public Kafka08TableSink(
+			TableSchema schema,
+			String topic,
+			Properties properties,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner,
+			SerializationSchema<Row> serializationSchema) {
+		super(
+			schema,
+			topic,
+			properties,
+			partitioner,
+			serializationSchema);
+	}
+
+	@Override
+	protected FlinkKafkaProducerBase<Row> createKafkaProducer(
+			String topic,
+			Properties properties,
+			SerializationSchema<Row> serializationSchema,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner) {
+		return new FlinkKafkaProducer08<>(
+			topic,
+			serializationSchema,
+			properties,
+			partitioner.orElse(null));
+	}
+}
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java
index 1a025b8a244..97c293e0690 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSource.java
@@ -58,7 +58,8 @@ public Kafka08TableSource(
 			Optional<String> proctimeAttribute,
 			List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
 			Optional<Map<String, String>> fieldMapping,
-			String topic, Properties properties,
+			String topic,
+			Properties properties,
 			DeserializationSchema<Row> deserializationSchema,
 			StartupMode startupMode,
 			Map<KafkaTopicPartition, Long> specificStartupOffsets) {
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSourceFactory.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSourceSinkFactory.java
similarity index 79%
rename from flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSourceFactory.java
rename to flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSourceSinkFactory.java
index cd337515614..aeccd4f1ac3 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSourceFactory.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSourceSinkFactory.java
@@ -19,8 +19,10 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.descriptors.KafkaValidator;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -34,7 +36,7 @@
 /**
  * Factory for creating configured instances of {@link Kafka08TableSource}.
  */
-public class Kafka08TableSourceFactory extends KafkaTableSourceFactory {
+public class Kafka08TableSourceSinkFactory extends KafkaTableSourceSinkFactoryBase {
 
 	@Override
 	protected String kafkaVersion() {
@@ -69,4 +71,20 @@ protected KafkaTableSource createKafkaTableSource(
 			startupMode,
 			specificStartupOffsets);
 	}
+
+	@Override
+	protected KafkaTableSink createKafkaTableSink(
+			TableSchema schema,
+			String topic,
+			Properties properties,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner,
+			SerializationSchema<Row> serializationSchema) {
+
+		return new Kafka08TableSink(
+			schema,
+			topic,
+			properties,
+			partitioner,
+			serializationSchema);
+	}
 }
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory b/flink-connectors/flink-connector-kafka-0.8/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
index b83bb3ffc5a..f2e1c3f5e60 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
@@ -13,4 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-org.apache.flink.streaming.connectors.kafka.Kafka08TableSourceFactory
+org.apache.flink.streaming.connectors.kafka.Kafka08TableSourceSinkFactory
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSinkTest.java b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSinkTest.java
index 53da9f6ba49..fc46ad4c6ee 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSinkTest.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08JsonTableSinkTest.java
@@ -27,7 +27,11 @@
 
 /**
  * Tests for the {@link Kafka08JsonTableSink}.
+ *
+ * @deprecated Ensures backwards compatibility with Flink 1.5. Can be removed once we
+ *             drop support for format-specific table sinks.
  */
+@Deprecated
 public class Kafka08JsonTableSinkTest extends KafkaTableSinkTestBase {
 
 	@Override
@@ -36,7 +40,10 @@ protected KafkaTableSink createTableSink(
 			Properties properties,
 			FlinkKafkaPartitioner<Row> partitioner) {
 
-		return new Kafka08JsonTableSink(topic, properties, partitioner);
+		return new Kafka08JsonTableSink(
+			topic,
+			properties,
+			partitioner);
 	}
 
 	@Override
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSourceFactoryTest.java b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSourceSinkFactoryTest.java
similarity index 73%
rename from flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSourceFactoryTest.java
rename to flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSourceSinkFactoryTest.java
index d939d885242..ff633ec0246 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSourceFactoryTest.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08TableSourceSinkFactoryTest.java
@@ -19,8 +19,10 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.descriptors.KafkaValidator;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -32,9 +34,10 @@
 import java.util.Properties;
 
 /**
- * Test for {@link Kafka08TableSource} created by {@link Kafka08TableSourceFactory}.
+ * Test for {@link Kafka08TableSource} and {@link Kafka08TableSink} created
+ * by {@link Kafka08TableSourceSinkFactory}.
  */
-public class Kafka08TableSourceFactoryTest extends KafkaTableSourceFactoryTestBase {
+public class Kafka08TableSourceSinkFactoryTest extends KafkaTableSourceSinkFactoryTestBase {
 
 	@Override
 	protected String getKafkaVersion() {
@@ -47,6 +50,11 @@ protected String getKafkaVersion() {
 		return (Class) FlinkKafkaConsumer08.class;
 	}
 
+	@Override
+	protected Class<?> getExpectedFlinkKafkaProducer() {
+		return FlinkKafkaProducer08.class;
+	}
+
 	@Override
 	protected KafkaTableSource getExpectedKafkaTableSource(
 			TableSchema schema,
@@ -71,4 +79,21 @@ protected KafkaTableSource getExpectedKafkaTableSource(
 			specificStartupOffsets
 		);
 	}
+
+	@Override
+	protected KafkaTableSink getExpectedKafkaTableSink(
+			TableSchema schema,
+			String topic,
+			Properties properties,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner,
+			SerializationSchema<Row> serializationSchema) {
+
+		return new Kafka08TableSink(
+			schema,
+			topic,
+			properties,
+			partitioner,
+			serializationSchema
+		);
+	}
 }
diff --git a/flink-connectors/flink-connector-kafka-0.9/pom.xml b/flink-connectors/flink-connector-kafka-0.9/pom.xml
index 20d29916880..74ada66f228 100644
--- a/flink-connectors/flink-connector-kafka-0.9/pom.xml
+++ b/flink-connectors/flink-connector-kafka-0.9/pom.xml
@@ -191,12 +191,12 @@ under the License.
 	</dependencies>
 
 	<profiles>
+		<!-- Create SQL Client uber jars by default -->
 		<profile>
-			<!-- Create SQL Client uber jars for releases -->
-			<id>release</id>
+			<id>sql-jars</id>
 			<activation>
 				<property>
-					<name>release</name>
+					<name>!skipSqlJars</name>
 				</property>
 			</activation>
 			<build>
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java
index cd4bad9e758..4352d7e400d 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09AvroTableSource.java
@@ -39,7 +39,7 @@
  * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
  *             with descriptors for schema and format instead. Descriptors allow for
  *             implementation-agnostic definition of tables. See also
- *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+ *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
  */
 @Deprecated
 public class Kafka09AvroTableSource extends KafkaAvroTableSource {
@@ -118,7 +118,7 @@ public void setRowtimeAttributeDescriptor(RowtimeAttributeDescriptor rowtimeAttr
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static Builder builder() {
@@ -131,7 +131,7 @@ public static Builder builder() {
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static class Builder extends KafkaAvroTableSource.Builder<Kafka09AvroTableSource, Kafka09AvroTableSource.Builder> {
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java
index 95ce4e6eff3..33634590061 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSink.java
@@ -18,20 +18,26 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaDelegatePartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.streaming.connectors.kafka.partitioner.KafkaPartitioner;
+import org.apache.flink.table.descriptors.ConnectorDescriptor;
 import org.apache.flink.types.Row;
 
+import java.util.Optional;
 import java.util.Properties;
 
 /**
  * Kafka 0.9 {@link KafkaTableSink} that serializes data in JSON format.
+ *
+ * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
+ *             with descriptors for schema and format instead. Descriptors allow for
+ *             implementation-agnostic definition of tables. See also
+ *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
  */
-@PublicEvolving
+@Deprecated
 public class Kafka09JsonTableSink extends KafkaJsonTableSink {
 
 	/**
@@ -48,7 +54,9 @@
 	 *
 	 * @param topic topic in Kafka to which table is written
 	 * @param properties properties to connect to Kafka
+	 * @deprecated Use table descriptors instead of implementation-specific classes.
 	 */
+	@Deprecated
 	public Kafka09JsonTableSink(String topic, Properties properties) {
 		super(topic, properties, new FlinkFixedPartitioner<>());
 	}
@@ -60,7 +68,9 @@ public Kafka09JsonTableSink(String topic, Properties properties) {
 	 * @param topic topic in Kafka to which table is written
 	 * @param properties properties to connect to Kafka
 	 * @param partitioner Kafka partitioner
+	 * @deprecated Use table descriptors instead of implementation-specific classes.
 	 */
+	@Deprecated
 	public Kafka09JsonTableSink(String topic, Properties properties, FlinkKafkaPartitioner<Row> partitioner) {
 		super(topic, properties, partitioner);
 	}
@@ -83,12 +93,23 @@ public Kafka09JsonTableSink(String topic, Properties properties, KafkaPartitione
 	}
 
 	@Override
-	protected FlinkKafkaProducerBase<Row> createKafkaProducer(String topic, Properties properties, SerializationSchema<Row> serializationSchema, FlinkKafkaPartitioner<Row> partitioner) {
-		return new FlinkKafkaProducer09<>(topic, serializationSchema, properties, partitioner);
+	protected FlinkKafkaProducerBase<Row> createKafkaProducer(
+			String topic,
+			Properties properties,
+			SerializationSchema<Row> serializationSchema,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner) {
+		return new FlinkKafkaProducer09<>(
+			topic,
+			serializationSchema,
+			properties,
+			partitioner.orElse(new FlinkFixedPartitioner<>()));
 	}
 
 	@Override
 	protected Kafka09JsonTableSink createCopy() {
-		return new Kafka09JsonTableSink(topic, properties, partitioner);
+		return new Kafka09JsonTableSink(
+			topic,
+			properties,
+			partitioner.orElse(new FlinkFixedPartitioner<>()));
 	}
 }
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java
index ad4e0d89768..db1df3d889e 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSource.java
@@ -36,7 +36,7 @@
  * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
  *             with descriptors for schema and format instead. Descriptors allow for
  *             implementation-agnostic definition of tables. See also
- *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+ *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
  */
 @Deprecated
 public class Kafka09JsonTableSource extends KafkaJsonTableSource {
@@ -120,7 +120,7 @@ public void setRowtimeAttributeDescriptor(RowtimeAttributeDescriptor rowtimeAttr
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static Kafka09JsonTableSource.Builder builder() {
@@ -133,7 +133,7 @@ public void setRowtimeAttributeDescriptor(RowtimeAttributeDescriptor rowtimeAttr
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	public static class Builder extends KafkaJsonTableSource.Builder<Kafka09JsonTableSource, Kafka09JsonTableSource.Builder> {
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSink.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSink.java
new file mode 100644
index 00000000000..6e38aad1a39
--- /dev/null
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSink.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kafka;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
+import org.apache.flink.table.api.TableSchema;
+import org.apache.flink.types.Row;
+
+import java.util.Optional;
+import java.util.Properties;
+
+/**
+ * Kafka 0.9 table sink for writing data into Kafka.
+ */
+@Internal
+public class Kafka09TableSink extends KafkaTableSink {
+
+	public Kafka09TableSink(
+			TableSchema schema,
+			String topic,
+			Properties properties,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner,
+			SerializationSchema<Row> serializationSchema) {
+		super(
+			schema,
+			topic,
+			properties,
+			partitioner,
+			serializationSchema);
+	}
+
+	@Override
+	protected FlinkKafkaProducerBase<Row> createKafkaProducer(
+			String topic,
+			Properties properties,
+			SerializationSchema<Row> serializationSchema,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner) {
+		return new FlinkKafkaProducer09<>(
+			topic,
+			serializationSchema,
+			properties,
+			partitioner.orElse(null));
+	}
+}
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java
index 18bc1c43513..8f9e799ec96 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSource.java
@@ -58,7 +58,8 @@ public Kafka09TableSource(
 			Optional<String> proctimeAttribute,
 			List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
 			Optional<Map<String, String>> fieldMapping,
-			String topic, Properties properties,
+			String topic,
+			Properties properties,
 			DeserializationSchema<Row> deserializationSchema,
 			StartupMode startupMode,
 			Map<KafkaTopicPartition, Long> specificStartupOffsets) {
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactory.java b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactory.java
similarity index 79%
rename from flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactory.java
rename to flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactory.java
index 14c52fd0f83..19f51508b93 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactory.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactory.java
@@ -19,8 +19,10 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.descriptors.KafkaValidator;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -34,7 +36,7 @@
 /**
  * Factory for creating configured instances of {@link Kafka09TableSource}.
  */
-public class Kafka09TableSourceFactory extends KafkaTableSourceFactory {
+public class Kafka09TableSourceSinkFactory extends KafkaTableSourceSinkFactoryBase {
 
 	@Override
 	protected String kafkaVersion() {
@@ -69,4 +71,20 @@ protected KafkaTableSource createKafkaTableSource(
 			startupMode,
 			specificStartupOffsets);
 	}
+
+	@Override
+	protected KafkaTableSink createKafkaTableSink(
+			TableSchema schema,
+			String topic,
+			Properties properties,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner,
+			SerializationSchema<Row> serializationSchema) {
+
+		return new Kafka09TableSink(
+			schema,
+			topic,
+			properties,
+			partitioner,
+			serializationSchema);
+	}
 }
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory b/flink-connectors/flink-connector-kafka-0.9/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
index fb14ddb5055..26258736937 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
+++ b/flink-connectors/flink-connector-kafka-0.9/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
@@ -13,4 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-org.apache.flink.streaming.connectors.kafka.Kafka09TableSourceFactory
+org.apache.flink.streaming.connectors.kafka.Kafka09TableSourceSinkFactory
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSinkTest.java b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSinkTest.java
index 610e048af2a..97b5c7d88a2 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSinkTest.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09JsonTableSinkTest.java
@@ -27,7 +27,11 @@
 
 /**
  * Tests for the {@link Kafka09JsonTableSink}.
+ *
+ * @deprecated Ensures backwards compatibility with Flink 1.5. Can be removed once we
+ *             drop support for format-specific table sinks.
  */
+@Deprecated
 public class Kafka09JsonTableSinkTest extends KafkaTableSinkTestBase {
 
 	@Override
@@ -36,7 +40,10 @@ protected KafkaTableSink createTableSink(
 			Properties properties,
 			FlinkKafkaPartitioner<Row> partitioner) {
 
-		return new Kafka09JsonTableSink(topic, properties, partitioner);
+		return new Kafka09JsonTableSink(
+			topic,
+			properties,
+			partitioner);
 	}
 
 	@Override
diff --git a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactoryTest.java b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactoryTest.java
similarity index 73%
rename from flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactoryTest.java
rename to flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactoryTest.java
index b976e14e8cd..d54c3945949 100644
--- a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactoryTest.java
+++ b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactoryTest.java
@@ -19,8 +19,10 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.descriptors.KafkaValidator;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
@@ -32,9 +34,10 @@
 import java.util.Properties;
 
 /**
- * Test for {@link Kafka09TableSource} created by {@link Kafka09TableSourceFactory}.
+ * Test for {@link Kafka09TableSource} and {@link Kafka09TableSink} created
+ * by {@link Kafka09TableSourceSinkFactory}.
  */
-public class Kafka09TableSourceFactoryTest extends KafkaTableSourceFactoryTestBase {
+public class Kafka09TableSourceSinkFactoryTest extends KafkaTableSourceSinkFactoryTestBase {
 
 	@Override
 	protected String getKafkaVersion() {
@@ -47,6 +50,11 @@ protected String getKafkaVersion() {
 		return (Class) FlinkKafkaConsumer09.class;
 	}
 
+	@Override
+	protected Class<?> getExpectedFlinkKafkaProducer() {
+		return FlinkKafkaProducer09.class;
+	}
+
 	@Override
 	protected KafkaTableSource getExpectedKafkaTableSource(
 			TableSchema schema,
@@ -71,4 +79,21 @@ protected KafkaTableSource getExpectedKafkaTableSource(
 			specificStartupOffsets
 		);
 	}
+
+	@Override
+	protected KafkaTableSink getExpectedKafkaTableSink(
+			TableSchema schema,
+			String topic,
+			Properties properties,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner,
+			SerializationSchema<Row> serializationSchema) {
+
+		return new Kafka09TableSink(
+			schema,
+			topic,
+			properties,
+			partitioner,
+			serializationSchema
+		);
+	}
 }
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
index 3e9f2b03e34..86fd21d15c6 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
@@ -40,7 +40,7 @@
  * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
  *             with descriptors for schema and format instead. Descriptors allow for
  *             implementation-agnostic definition of tables. See also
- *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+ *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
  */
 @Deprecated
 @Internal
@@ -56,7 +56,7 @@
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	protected KafkaAvroTableSource(
@@ -89,7 +89,7 @@ public String explainSource() {
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	protected abstract static class Builder<T extends KafkaAvroTableSource, B extends KafkaAvroTableSource.Builder>
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java
index ec273989a55..231edddb311 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSink.java
@@ -29,7 +29,10 @@
 
 /**
  * Base class for {@link KafkaTableSink} that serializes data in JSON format.
+ *
+ * @deprecated Use table descriptors instead of implementation-specific classes.
  */
+@Deprecated
 @Internal
 public abstract class KafkaJsonTableSink extends KafkaTableSink {
 
@@ -39,7 +42,9 @@
 	 * @param topic topic in Kafka to which table is written
 	 * @param properties properties to connect to Kafka
 	 * @param partitioner Kafka partitioner
+	 * @deprecated Use table descriptors instead of implementation-specific classes.
 	 */
+	@Deprecated
 	public KafkaJsonTableSink(String topic, Properties properties, FlinkKafkaPartitioner<Row> partitioner) {
 		super(topic, properties, partitioner);
 	}
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java
index bd0d0dedf22..70b286bc296 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSource.java
@@ -39,7 +39,7 @@
  * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
  *             with descriptors for schema and format instead. Descriptors allow for
  *             implementation-agnostic definition of tables. See also
- *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+ *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
  */
 @Deprecated
 @Internal
@@ -98,7 +98,7 @@ protected void setFailOnMissingField(boolean failOnMissingField) {
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	protected abstract static class Builder<T extends KafkaJsonTableSource, B extends KafkaJsonTableSource.Builder>
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java
index 687df589cd0..a85d536eac9 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSink.java
@@ -23,44 +23,81 @@
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
 import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.functions.sink.SinkFunction;
 import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
+import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.sinks.AppendStreamTableSink;
 import org.apache.flink.table.util.TableConnectorUtil;
 import org.apache.flink.types.Row;
 import org.apache.flink.util.Preconditions;
 
+import java.util.Arrays;
+import java.util.Objects;
+import java.util.Optional;
 import java.util.Properties;
 
 /**
  * A version-agnostic Kafka {@link AppendStreamTableSink}.
  *
  * <p>The version-specific Kafka consumers need to extend this class and
- * override {@link #createKafkaProducer(String, Properties, SerializationSchema, FlinkKafkaPartitioner)}}.
+ * override {@link #createKafkaProducer(String, Properties, SerializationSchema, Optional)}}.
  */
 @Internal
 public abstract class KafkaTableSink implements AppendStreamTableSink<Row> {
 
+	// TODO make all attributes final and mandatory once we drop support for format-specific table sinks
+
+	/** The schema of the table. */
+	private final Optional<TableSchema> schema;
+
+	/** The Kafka topic to write to. */
 	protected final String topic;
+
+	/** Properties for the Kafka producer. */
 	protected final Properties properties;
-	protected SerializationSchema<Row> serializationSchema;
-	protected final FlinkKafkaPartitioner<Row> partitioner;
+
+	/** Serialization schema for encoding records to Kafka. */
+	protected Optional<SerializationSchema<Row>> serializationSchema;
+
+	/** Partitioner to select Kafka partition for each item. */
+	protected final Optional<FlinkKafkaPartitioner<Row>> partitioner;
+
+	// legacy variables
 	protected String[] fieldNames;
 	protected TypeInformation[] fieldTypes;
 
+	protected KafkaTableSink(
+			TableSchema schema,
+			String topic,
+			Properties properties,
+			Optional<FlinkKafkaPartitioner<Row>> partitioner,
+			SerializationSchema<Row> serializationSchema) {
+		this.schema = Optional.of(Preconditions.checkNotNull(schema, "Schema must not be null."));
+		this.topic = Preconditions.checkNotNull(topic, "Topic must not be null.");
+		this.properties = Preconditions.checkNotNull(properties, "Properties must not be null.");
+		this.partitioner = Preconditions.checkNotNull(partitioner, "Partitioner must not be null.");
+		this.serializationSchema = Optional.of(Preconditions.checkNotNull(
+			serializationSchema, "Serialization schema must not be null."));
+	}
+
 	/**
 	 * Creates KafkaTableSink.
 	 *
 	 * @param topic                 Kafka topic to write to.
-	 * @param properties            Properties for the Kafka consumer.
+	 * @param properties            Properties for the Kafka producer.
 	 * @param partitioner           Partitioner to select Kafka partition for each item
+	 * @deprecated Use table descriptors instead of implementation-specific classes.
 	 */
+	@Deprecated
 	public KafkaTableSink(
 			String topic,
 			Properties properties,
 			FlinkKafkaPartitioner<Row> partitioner) {
+		this.schema = Optional.empty();
 		this.topic = Preconditions.checkNotNull(topic, "topic");
 		this.properties = Preconditions.checkNotNull(properties, "properties");
-		this.partitioner = Preconditions.checkNotNull(partitioner, "partitioner");
+		this.partitioner = Optional.of(Preconditions.checkNotNull(partitioner, "partitioner"));
+		this.serializationSchema = Optional.empty();
 	}
 
 	/**
@@ -72,50 +109,68 @@ public KafkaTableSink(
 	 * @param partitioner         Partitioner to select Kafka partition.
 	 * @return The version-specific Kafka producer
 	 */
-	protected abstract FlinkKafkaProducerBase<Row> createKafkaProducer(
-		String topic, Properties properties,
+	protected abstract SinkFunction<Row> createKafkaProducer(
+		String topic,
+		Properties properties,
 		SerializationSchema<Row> serializationSchema,
-		FlinkKafkaPartitioner<Row> partitioner);
+		Optional<FlinkKafkaPartitioner<Row>> partitioner);
 
 	/**
 	 * Create serialization schema for converting table rows into bytes.
 	 *
 	 * @param rowSchema the schema of the row to serialize.
 	 * @return Instance of serialization schema
+	 * @deprecated Use the constructor to pass a serialization schema instead.
 	 */
-	protected abstract SerializationSchema<Row> createSerializationSchema(RowTypeInfo rowSchema);
+	@Deprecated
+	protected SerializationSchema<Row> createSerializationSchema(RowTypeInfo rowSchema) {
+		throw new UnsupportedOperationException("This method only exists for backwards compatibility.");
+	}
 
 	/**
 	 * Create a deep copy of this sink.
 	 *
 	 * @return Deep copy of this sink
 	 */
-	protected abstract KafkaTableSink createCopy();
+	@Deprecated
+	protected KafkaTableSink createCopy() {
+		throw new UnsupportedOperationException("This method only exists for backwards compatibility.");
+	}
 
 	@Override
 	public void emitDataStream(DataStream<Row> dataStream) {
-		FlinkKafkaProducerBase<Row> kafkaProducer = createKafkaProducer(topic, properties, serializationSchema, partitioner);
-		// always enable flush on checkpoint to achieve at-least-once if query runs with checkpointing enabled.
-		kafkaProducer.setFlushOnCheckpoint(true);
+		SinkFunction<Row> kafkaProducer = createKafkaProducer(
+			topic,
+			properties,
+			serializationSchema.orElseThrow(() -> new IllegalStateException("No serialization schema defined.")),
+			partitioner);
 		dataStream.addSink(kafkaProducer).name(TableConnectorUtil.generateRuntimeName(this.getClass(), fieldNames));
 	}
 
 	@Override
 	public TypeInformation<Row> getOutputType() {
-		return new RowTypeInfo(getFieldTypes());
+		return schema
+			.map(TableSchema::toRowType)
+			.orElseGet(() -> new RowTypeInfo(getFieldTypes()));
 	}
 
 	public String[] getFieldNames() {
-		return fieldNames;
+		return schema.map(TableSchema::getColumnNames).orElse(fieldNames);
 	}
 
 	@Override
 	public TypeInformation<?>[] getFieldTypes() {
-		return fieldTypes;
+		return schema.map(TableSchema::getTypes).orElse(fieldTypes);
 	}
 
 	@Override
 	public KafkaTableSink configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
+		if (schema.isPresent()) {
+			// a fixed schema is defined so reconfiguration is not supported
+			throw new UnsupportedOperationException("Reconfiguration of this sink is not supported.");
+		}
+
+		// legacy code
 		KafkaTableSink copy = createCopy();
 		copy.fieldNames = Preconditions.checkNotNull(fieldNames, "fieldNames");
 		copy.fieldTypes = Preconditions.checkNotNull(fieldTypes, "fieldTypes");
@@ -123,8 +178,39 @@ public KafkaTableSink configure(String[] fieldNames, TypeInformation<?>[] fieldT
 			"Number of provided field names and types does not match.");
 
 		RowTypeInfo rowSchema = new RowTypeInfo(fieldTypes, fieldNames);
-		copy.serializationSchema = createSerializationSchema(rowSchema);
+		copy.serializationSchema = Optional.of(createSerializationSchema(rowSchema));
 
 		return copy;
 	}
+
+	@Override
+	public boolean equals(Object o) {
+		if (this == o) {
+			return true;
+		}
+		if (o == null || getClass() != o.getClass()) {
+			return false;
+		}
+		KafkaTableSink that = (KafkaTableSink) o;
+		return Objects.equals(schema, that.schema) &&
+			Objects.equals(topic, that.topic) &&
+			Objects.equals(properties, that.properties) &&
+			Objects.equals(serializationSchema, that.serializationSchema) &&
+			Objects.equals(partitioner, that.partitioner) &&
+			Arrays.equals(fieldNames, that.fieldNames) &&
+			Arrays.equals(fieldTypes, that.fieldTypes);
+	}
+
+	@Override
+	public int hashCode() {
+		int result = Objects.hash(
+			schema,
+			topic,
+			properties,
+			serializationSchema,
+			partitioner);
+		result = 31 * result + Arrays.hashCode(fieldNames);
+		result = 31 * result + Arrays.hashCode(fieldTypes);
+		return result;
+	}
 }
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
index 78b373bc2c4..474c22f704f 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
@@ -406,7 +406,7 @@ protected void setFieldMapping(Map<String, String> fieldMapping) {
 	 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 	 *             with descriptors for schema and format instead. Descriptors allow for
 	 *             implementation-agnostic definition of tables. See also
-	 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+	 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 	 */
 	@Deprecated
 	protected abstract static class Builder<T extends KafkaTableSource, B extends KafkaTableSource.Builder> {
@@ -676,7 +676,7 @@ protected void configureTableSource(T tableSource) {
 		 * @deprecated Use the {@link org.apache.flink.table.descriptors.Kafka} descriptor together
 		 *             with descriptors for schema and format instead. Descriptors allow for
 		 *             implementation-agnostic definition of tables. See also
-		 *             {@link org.apache.flink.table.api.TableEnvironment#from(ConnectorDescriptor)}.
+		 *             {@link org.apache.flink.table.api.TableEnvironment#connect(ConnectorDescriptor)}.
 		 */
 		@Deprecated
 		protected abstract B builder();
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceFactory.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
similarity index 56%
rename from flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceFactory.java
rename to flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
index 380d657e0f8..5634331adbb 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceFactory.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
@@ -19,19 +19,26 @@
 package org.apache.flink.streaming.connectors.kafka;
 
 import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.common.serialization.SerializationSchema;
 import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.table.api.TableException;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.descriptors.DescriptorProperties;
 import org.apache.flink.table.descriptors.KafkaValidator;
 import org.apache.flink.table.descriptors.SchemaValidator;
 import org.apache.flink.table.factories.DeserializationSchemaFactory;
+import org.apache.flink.table.factories.SerializationSchemaFactory;
+import org.apache.flink.table.factories.StreamTableSinkFactory;
 import org.apache.flink.table.factories.StreamTableSourceFactory;
 import org.apache.flink.table.factories.TableFactoryService;
+import org.apache.flink.table.sinks.StreamTableSink;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
 import org.apache.flink.table.sources.StreamTableSource;
 import org.apache.flink.types.Row;
+import org.apache.flink.util.InstantiationUtil;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -48,6 +55,11 @@
 import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_PROPERTIES;
 import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_PROPERTIES_KEY;
 import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_PROPERTIES_VALUE;
+import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SINK_PARTITIONER;
+import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SINK_PARTITIONER_CLASS;
+import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM;
+import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SINK_PARTITIONER_VALUE_FIXED;
+import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN;
 import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SPECIFIC_OFFSETS;
 import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SPECIFIC_OFFSETS_OFFSET;
 import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SPECIFIC_OFFSETS_PARTITION;
@@ -67,15 +79,20 @@
 import static org.apache.flink.table.descriptors.SchemaValidator.SCHEMA_NAME;
 import static org.apache.flink.table.descriptors.SchemaValidator.SCHEMA_PROCTIME;
 import static org.apache.flink.table.descriptors.SchemaValidator.SCHEMA_TYPE;
+import static org.apache.flink.table.descriptors.StreamTableDescriptorValidator.UPDATE_MODE;
+import static org.apache.flink.table.descriptors.StreamTableDescriptorValidator.UPDATE_MODE_VALUE_APPEND;
 
 /**
  * Factory for creating configured instances of {@link KafkaTableSource}.
  */
-public abstract class KafkaTableSourceFactory implements StreamTableSourceFactory<Row> {
+public abstract class KafkaTableSourceSinkFactoryBase implements
+		StreamTableSourceFactory<Row>,
+		StreamTableSinkFactory<Row> {
 
 	@Override
 	public Map<String, String> requiredContext() {
 		Map<String, String> context = new HashMap<>();
+		context.put(UPDATE_MODE(), UPDATE_MODE_VALUE_APPEND()); // append mode
 		context.put(CONNECTOR_TYPE(), CONNECTOR_TYPE_VALUE_KAFKA); // kafka
 		context.put(CONNECTOR_VERSION(), kafkaVersion()); // version
 		context.put(CONNECTOR_PROPERTY_VERSION(), "1"); // backwards compatibility
@@ -94,6 +111,8 @@
 		properties.add(CONNECTOR_STARTUP_MODE);
 		properties.add(CONNECTOR_SPECIFIC_OFFSETS + ".#." + CONNECTOR_SPECIFIC_OFFSETS_PARTITION);
 		properties.add(CONNECTOR_SPECIFIC_OFFSETS + ".#." + CONNECTOR_SPECIFIC_OFFSETS_OFFSET);
+		properties.add(CONNECTOR_SINK_PARTITIONER);
+		properties.add(CONNECTOR_SINK_PARTITIONER_CLASS);
 
 		// schema
 		properties.add(SCHEMA() + ".#." + SCHEMA_TYPE());
@@ -119,89 +138,48 @@
 
 	@Override
 	public StreamTableSource<Row> createStreamTableSource(Map<String, String> properties) {
-		final DescriptorProperties params = new DescriptorProperties(true);
-		params.putProperties(properties);
+		final DescriptorProperties descriptorProperties = getValidatedProperties(properties);
 
-		// validate
-		// allow Kafka timestamps to be used, watermarks can not be received from source
-		new SchemaValidator(true, supportsKafkaTimestamps(), false).validate(params);
-		new KafkaValidator().validate(params);
-
-		// deserialization schema using format discovery
-		final DeserializationSchemaFactory<?> formatFactory = TableFactoryService.find(
-			DeserializationSchemaFactory.class,
-			properties,
-			this.getClass().getClassLoader());
-		@SuppressWarnings("unchecked")
-		final DeserializationSchema<Row> deserializationSchema = (DeserializationSchema<Row>) formatFactory
-			.createDeserializationSchema(properties);
-
-		// schema
-		final TableSchema schema = params.getTableSchema(SCHEMA());
-
-		// proctime
-		final Optional<String> proctimeAttribute = SchemaValidator.deriveProctimeAttribute(params);
-
-		// rowtime
-		final List<RowtimeAttributeDescriptor> rowtimeAttributes = SchemaValidator.deriveRowtimeAttributes(params);
-
-		// field mapping
-		final Map<String, String> fieldMapping = SchemaValidator.deriveFieldMapping(params, Optional.of(schema));
-
-		// properties
-		final Properties kafkaProperties = new Properties();
-		final List<Map<String, String>> propsList = params.getFixedIndexedProperties(
-			CONNECTOR_PROPERTIES,
-			Arrays.asList(CONNECTOR_PROPERTIES_KEY, CONNECTOR_PROPERTIES_VALUE));
-		propsList.forEach(kv -> kafkaProperties.put(
-			params.getString(kv.get(CONNECTOR_PROPERTIES_KEY)),
-			params.getString(kv.get(CONNECTOR_PROPERTIES_VALUE))
-		));
-
-		// topic
-		final String topic = params.getString(CONNECTOR_TOPIC);
+		final String topic = descriptorProperties.getString(CONNECTOR_TOPIC);
+		final DeserializationSchema<Row> deserializationSchema = getDeserializationSchema(properties);
+		final StartupOptions startupOptions = getStartupOptions(descriptorProperties, topic);
 
-		// startup mode
-		final Map<KafkaTopicPartition, Long> specificOffsets = new HashMap<>();
-		final StartupMode startupMode = params
-			.getOptionalString(CONNECTOR_STARTUP_MODE)
-			.map(modeString -> {
-				switch (modeString) {
-					case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_EARLIEST:
-						return StartupMode.EARLIEST;
+		return createKafkaTableSource(
+			descriptorProperties.getTableSchema(SCHEMA()),
+			SchemaValidator.deriveProctimeAttribute(descriptorProperties),
+			SchemaValidator.deriveRowtimeAttributes(descriptorProperties),
+			SchemaValidator.deriveFieldMapping(
+				descriptorProperties,
+				Optional.of(deserializationSchema.getProducedType())),
+			topic,
+			getKafkaProperties(descriptorProperties),
+			deserializationSchema,
+			startupOptions.startupMode,
+			startupOptions.specificOffsets);
+	}
 
-					case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_LATEST:
-						return StartupMode.LATEST;
+	@Override
+	public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) {
+		final DescriptorProperties descriptorProperties = getValidatedProperties(properties);
 
-					case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_GROUP_OFFSETS:
-						return StartupMode.GROUP_OFFSETS;
+		final TableSchema schema = descriptorProperties.getTableSchema(SCHEMA());
+		final String topic = descriptorProperties.getString(CONNECTOR_TOPIC);
+		final Optional<String> proctime = SchemaValidator.deriveProctimeAttribute(descriptorProperties);
+		final List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors =
+			SchemaValidator.deriveRowtimeAttributes(descriptorProperties);
 
-					case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_SPECIFIC_OFFSETS:
-						final List<Map<String, String>> offsetList = params.getFixedIndexedProperties(
-							CONNECTOR_SPECIFIC_OFFSETS,
-							Arrays.asList(CONNECTOR_SPECIFIC_OFFSETS_PARTITION, CONNECTOR_SPECIFIC_OFFSETS_OFFSET));
-						offsetList.forEach(kv -> {
-							final int partition = params.getInt(kv.get(CONNECTOR_SPECIFIC_OFFSETS_PARTITION));
-							final long offset = params.getLong(kv.get(CONNECTOR_SPECIFIC_OFFSETS_OFFSET));
-							final KafkaTopicPartition topicPartition = new KafkaTopicPartition(topic, partition);
-							specificOffsets.put(topicPartition, offset);
-						});
-						return StartupMode.SPECIFIC_OFFSETS;
-					default:
-						throw new TableException("Unsupported startup mode. Validator should have checked that.");
-				}
-			}).orElse(StartupMode.GROUP_OFFSETS);
+		// see also FLINK-9870
+		if (proctime.isPresent() || !rowtimeAttributeDescriptors.isEmpty() ||
+				checkForCustomFieldMapping(descriptorProperties, schema)) {
+			throw new TableException("Time attributes and custom field mappings are not supported yet.");
+		}
 
-		return createKafkaTableSource(
+		return createKafkaTableSink(
 			schema,
-			proctimeAttribute,
-			rowtimeAttributes,
-			fieldMapping,
 			topic,
-			kafkaProperties,
-			deserializationSchema,
-			startupMode,
-			specificOffsets);
+			getKafkaProperties(descriptorProperties),
+			getFlinkKafkaPartitioner(descriptorProperties),
+			getSerializationSchema(properties));
 	}
 
 	// --------------------------------------------------------------------------------------------
@@ -240,9 +218,140 @@ protected abstract KafkaTableSource createKafkaTableSource(
 		Optional<String> proctimeAttribute,
 		List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
 		Map<String, String> fieldMapping,
-		String topic, Properties properties,
+		String topic,
+		Properties properties,
 		DeserializationSchema<Row> deserializationSchema,
 		StartupMode startupMode,
 		Map<KafkaTopicPartition, Long> specificStartupOffsets);
 
+	/**
+	 * Constructs the version-specific Kafka table sink.
+	 *
+	 * @param schema      Schema of the produced table.
+	 * @param topic       Kafka topic to consume.
+	 * @param properties  Properties for the Kafka consumer.
+	 * @param partitioner Partitioner to select Kafka partition for each item.
+	 */
+	protected abstract KafkaTableSink createKafkaTableSink(
+		TableSchema schema,
+		String topic,
+		Properties properties,
+		Optional<FlinkKafkaPartitioner<Row>> partitioner,
+		SerializationSchema<Row> serializationSchema);
+
+	// --------------------------------------------------------------------------------------------
+	// Helper methods
+	// --------------------------------------------------------------------------------------------
+
+	private DescriptorProperties getValidatedProperties(Map<String, String> properties) {
+		final DescriptorProperties descriptorProperties = new DescriptorProperties(true);
+		descriptorProperties.putProperties(properties);
+
+		// allow Kafka timestamps to be used, watermarks can not be received from source
+		new SchemaValidator(true, supportsKafkaTimestamps(), false).validate(descriptorProperties);
+		new KafkaValidator().validate(descriptorProperties);
+
+		return descriptorProperties;
+	}
+
+	private DeserializationSchema<Row> getDeserializationSchema(Map<String, String> properties) {
+		@SuppressWarnings("unchecked")
+		final DeserializationSchemaFactory<Row> formatFactory = TableFactoryService.find(
+			DeserializationSchemaFactory.class,
+			properties,
+			this.getClass().getClassLoader());
+		return formatFactory.createDeserializationSchema(properties);
+	}
+
+	private SerializationSchema<Row> getSerializationSchema(Map<String, String> properties) {
+		@SuppressWarnings("unchecked")
+		final SerializationSchemaFactory<Row> formatFactory = TableFactoryService.find(
+			SerializationSchemaFactory.class,
+			properties,
+			this.getClass().getClassLoader());
+		return formatFactory.createSerializationSchema(properties);
+	}
+
+	private Properties getKafkaProperties(DescriptorProperties descriptorProperties) {
+		final Properties kafkaProperties = new Properties();
+		final List<Map<String, String>> propsList = descriptorProperties.getFixedIndexedProperties(
+			CONNECTOR_PROPERTIES,
+			Arrays.asList(CONNECTOR_PROPERTIES_KEY, CONNECTOR_PROPERTIES_VALUE));
+		propsList.forEach(kv -> kafkaProperties.put(
+			descriptorProperties.getString(kv.get(CONNECTOR_PROPERTIES_KEY)),
+			descriptorProperties.getString(kv.get(CONNECTOR_PROPERTIES_VALUE))
+		));
+		return kafkaProperties;
+	}
+
+	private StartupOptions getStartupOptions(
+			DescriptorProperties descriptorProperties,
+			String topic) {
+		final Map<KafkaTopicPartition, Long> specificOffsets = new HashMap<>();
+		final StartupMode startupMode = descriptorProperties
+			.getOptionalString(CONNECTOR_STARTUP_MODE)
+			.map(modeString -> {
+				switch (modeString) {
+					case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_EARLIEST:
+						return StartupMode.EARLIEST;
+
+					case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_LATEST:
+						return StartupMode.LATEST;
+
+					case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_GROUP_OFFSETS:
+						return StartupMode.GROUP_OFFSETS;
+
+					case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_SPECIFIC_OFFSETS:
+						final List<Map<String, String>> offsetList = descriptorProperties.getFixedIndexedProperties(
+							CONNECTOR_SPECIFIC_OFFSETS,
+							Arrays.asList(CONNECTOR_SPECIFIC_OFFSETS_PARTITION, CONNECTOR_SPECIFIC_OFFSETS_OFFSET));
+						offsetList.forEach(kv -> {
+							final int partition = descriptorProperties.getInt(kv.get(CONNECTOR_SPECIFIC_OFFSETS_PARTITION));
+							final long offset = descriptorProperties.getLong(kv.get(CONNECTOR_SPECIFIC_OFFSETS_OFFSET));
+							final KafkaTopicPartition topicPartition = new KafkaTopicPartition(topic, partition);
+							specificOffsets.put(topicPartition, offset);
+						});
+						return StartupMode.SPECIFIC_OFFSETS;
+					default:
+						throw new TableException("Unsupported startup mode. Validator should have checked that.");
+				}
+			}).orElse(StartupMode.GROUP_OFFSETS);
+		final StartupOptions options = new StartupOptions();
+		options.startupMode = startupMode;
+		options.specificOffsets = specificOffsets;
+		return options;
+	}
+
+	@SuppressWarnings("unchecked")
+	private Optional<FlinkKafkaPartitioner<Row>> getFlinkKafkaPartitioner(DescriptorProperties descriptorProperties) {
+		return descriptorProperties
+			.getOptionalString(CONNECTOR_SINK_PARTITIONER)
+			.flatMap((String partitionerString) -> {
+				switch (partitionerString) {
+					case CONNECTOR_SINK_PARTITIONER_VALUE_FIXED:
+						return Optional.of(new FlinkFixedPartitioner<>());
+					case CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN:
+						return Optional.empty();
+					case CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM:
+						final Class<? extends FlinkKafkaPartitioner> partitionerClass =
+							descriptorProperties.getClass(CONNECTOR_SINK_PARTITIONER_CLASS, FlinkKafkaPartitioner.class);
+						return Optional.of(InstantiationUtil.instantiate(partitionerClass));
+					default:
+						throw new TableException("Unsupported sink partitioner. Validator should have checked that.");
+				}
+			});
+	}
+
+	private boolean checkForCustomFieldMapping(DescriptorProperties descriptorProperties, TableSchema schema) {
+		final Map<String, String> fieldMapping = SchemaValidator.deriveFieldMapping(
+			descriptorProperties,
+			Optional.of(schema.toRowType())); // until FLINK-9870 is fixed we assume that the table schema is the output type
+		return fieldMapping.size() != schema.getColumnNames().length ||
+			!fieldMapping.entrySet().stream().allMatch(mapping -> mapping.getKey().equals(mapping.getValue()));
+	}
+
+	private static class StartupOptions {
+		private StartupMode startupMode;
+		private Map<KafkaTopicPartition, Long> specificOffsets;
+	}
 }
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
index 6e83ddd32ea..7e0d1fbe456 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/partitioner/FlinkFixedPartitioner.java
@@ -74,4 +74,14 @@ public int partition(T record, byte[] key, byte[] value, String targetTopic, int
 
 		return partitions[parallelInstanceId % partitions.length];
 	}
+
+	@Override
+	public boolean equals(Object o) {
+		return this == o || o instanceof FlinkFixedPartitioner;
+	}
+
+	@Override
+	public int hashCode() {
+		return FlinkFixedPartitioner.class.hashCode();
+	}
 }
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/table/descriptors/Kafka.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/table/descriptors/Kafka.java
index 45359587c1c..e44341a991e 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/table/descriptors/Kafka.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/table/descriptors/Kafka.java
@@ -20,6 +20,7 @@
 
 import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase;
 import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.util.Preconditions;
 
 import java.util.ArrayList;
@@ -34,6 +35,11 @@
 import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_PROPERTIES;
 import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_PROPERTIES_KEY;
 import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_PROPERTIES_VALUE;
+import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SINK_PARTITIONER;
+import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SINK_PARTITIONER_CLASS;
+import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM;
+import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SINK_PARTITIONER_VALUE_FIXED;
+import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN;
 import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SPECIFIC_OFFSETS;
 import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SPECIFIC_OFFSETS_OFFSET;
 import static org.apache.flink.table.descriptors.KafkaValidator.CONNECTOR_SPECIFIC_OFFSETS_PARTITION;
@@ -51,6 +57,8 @@
 	private StartupMode startupMode;
 	private Map<Integer, Long> specificOffsets;
 	private Map<String, String> kafkaProperties;
+	private String sinkPartitionerType;
+	private Class<? extends FlinkKafkaPartitioner> sinkPartitionerClass;
 
 	/**
 	 * Connector descriptor for the Apache Kafka message queue.
@@ -175,6 +183,69 @@ public Kafka startFromSpecificOffset(int partition, long specificOffset) {
 		return this;
 	}
 
+	/**
+	 * Configures how to partition records from Flink's partitions into Kafka's partitions.
+	 *
+	 * <p>This strategy ensures that each Flink partition ends up in one Kafka partition.
+	 *
+	 * <p>Note: One Kafka partition can contain multiple Flink partitions. Examples:
+	 *
+	 * <p>More Flink partitions than Kafka partitions. Some (or all) Kafka partitions contain
+	 * the output of more than one flink partition:
+	 * <pre>
+	 *     Flink Sinks            Kafka Partitions
+	 *         1    ----------------&gt;    1
+	 *         2    --------------/
+	 *         3    -------------/
+	 *         4    ------------/
+	 * </pre>
+	 *
+	 *
+	 * <p>Fewer Flink partitions than Kafka partitions:
+	 * <pre>
+	 *     Flink Sinks            Kafka Partitions
+	 *         1    ----------------&gt;    1
+	 *         2    ----------------&gt;    2
+	 *                                      3
+	 *                                      4
+	 *                                      5
+	 * </pre>
+	 *
+	 * @see org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner
+	 */
+	public Kafka sinkPartitionerFixed() {
+		sinkPartitionerType = CONNECTOR_SINK_PARTITIONER_VALUE_FIXED;
+		sinkPartitionerClass = null;
+		return this;
+	}
+
+	/**
+	 * Configures how to partition records from Flink's partitions into Kafka's partitions.
+	 *
+	 * <p>This strategy ensures that records will be distributed to Kafka partitions in a
+	 * round-robin fashion.
+	 *
+	 * <p>Note: This strategy is useful to avoid an unbalanced partitioning. However, it will
+	 * cause a lot of network connections between all the Flink instances and all the Kafka brokers.
+	 */
+	public Kafka sinkPartitionerRoundRobin() {
+		sinkPartitionerType = CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN;
+		sinkPartitionerClass = null;
+		return this;
+	}
+
+	/**
+	 * Configures how to partition records from Flink's partitions into Kafka's partitions.
+	 *
+	 * <p>This strategy allows for a custom partitioner by providing an implementation
+	 * of {@link FlinkKafkaPartitioner}.
+	 */
+	public Kafka sinkPartitionerCustom(Class<? extends FlinkKafkaPartitioner> partitionerClass) {
+		sinkPartitionerType = CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM;
+		sinkPartitionerClass = Preconditions.checkNotNull(partitionerClass);
+		return this;
+	}
+
 	/**
 	 * Internal method for connector properties conversion.
 	 */
@@ -212,5 +283,12 @@ public void addConnectorProperties(DescriptorProperties properties) {
 					.collect(Collectors.toList())
 				);
 		}
+
+		if (sinkPartitionerType != null) {
+			properties.putString(CONNECTOR_SINK_PARTITIONER, sinkPartitionerType);
+			if (sinkPartitionerClass != null) {
+				properties.putClass(CONNECTOR_SINK_PARTITIONER_CLASS, sinkPartitionerClass);
+			}
+		}
 	}
 }
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/table/descriptors/KafkaValidator.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/table/descriptors/KafkaValidator.java
index 3adc7c518a4..cad37f8f8cd 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/table/descriptors/KafkaValidator.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/table/descriptors/KafkaValidator.java
@@ -48,12 +48,27 @@
 	public static final String CONNECTOR_PROPERTIES = "connector.properties";
 	public static final String CONNECTOR_PROPERTIES_KEY = "key";
 	public static final String CONNECTOR_PROPERTIES_VALUE = "value";
+	public static final String CONNECTOR_SINK_PARTITIONER = "connector.sink-partitioner";
+	public static final String CONNECTOR_SINK_PARTITIONER_VALUE_FIXED = "fixed";
+	public static final String CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN = "round-robin";
+	public static final String CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM = "custom";
+	public static final String CONNECTOR_SINK_PARTITIONER_CLASS = "connector.sink-partitioner-class";
 
 	@Override
 	public void validate(DescriptorProperties properties) {
 		super.validate(properties);
 		properties.validateValue(CONNECTOR_TYPE(), CONNECTOR_TYPE_VALUE_KAFKA, false);
 
+		validateVersion(properties);
+
+		validateStartupMode(properties);
+
+		validateKafkaProperties(properties);
+
+		validateSinkPartitioner(properties);
+	}
+
+	private void validateVersion(DescriptorProperties properties) {
 		final List<String> versions = Arrays.asList(
 			CONNECTOR_VERSION_VALUE_08,
 			CONNECTOR_VERSION_VALUE_09,
@@ -61,7 +76,9 @@ public void validate(DescriptorProperties properties) {
 			CONNECTOR_VERSION_VALUE_011);
 		properties.validateEnumValues(CONNECTOR_VERSION(), false, versions);
 		properties.validateString(CONNECTOR_TOPIC, false, 1, Integer.MAX_VALUE);
+	}
 
+	private void validateStartupMode(DescriptorProperties properties) {
 		final Map<String, Consumer<String>> specificOffsetValidators = new HashMap<>();
 		specificOffsetValidators.put(
 			CONNECTOR_SPECIFIC_OFFSETS_PARTITION,
@@ -86,17 +103,29 @@ public void validate(DescriptorProperties properties) {
 			CONNECTOR_STARTUP_MODE_VALUE_SPECIFIC_OFFSETS,
 			prefix -> properties.validateFixedIndexedProperties(CONNECTOR_SPECIFIC_OFFSETS, false, specificOffsetValidators));
 		properties.validateEnum(CONNECTOR_STARTUP_MODE, true, startupModeValidation);
+	}
 
+	private void validateKafkaProperties(DescriptorProperties properties) {
 		final Map<String, Consumer<String>> propertyValidators = new HashMap<>();
 		propertyValidators.put(
 			CONNECTOR_PROPERTIES_KEY,
-			prefix -> properties.validateString(prefix + CONNECTOR_PROPERTIES_KEY, false, 1, Integer.MAX_VALUE));
+			prefix -> properties.validateString(prefix + CONNECTOR_PROPERTIES_KEY, false, 1));
 		propertyValidators.put(
 			CONNECTOR_PROPERTIES_VALUE,
-			prefix -> properties.validateString(prefix + CONNECTOR_PROPERTIES_VALUE, false, 0, Integer.MAX_VALUE));
+			prefix -> properties.validateString(prefix + CONNECTOR_PROPERTIES_VALUE, false, 0));
 		properties.validateFixedIndexedProperties(CONNECTOR_PROPERTIES, true, propertyValidators);
 	}
 
+	private void validateSinkPartitioner(DescriptorProperties properties) {
+		final Map<String, Consumer<String>> sinkPartitionerValidators = new HashMap<>();
+		sinkPartitionerValidators.put(CONNECTOR_SINK_PARTITIONER_VALUE_FIXED, properties.noValidation());
+		sinkPartitionerValidators.put(CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN, properties.noValidation());
+		sinkPartitionerValidators.put(
+			CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM,
+			prefix -> properties.validateString(CONNECTOR_SINK_PARTITIONER_CLASS, false, 1));
+		properties.validateEnum(CONNECTOR_SINK_PARTITIONER, true, sinkPartitionerValidators);
+	}
+
 	// utilities
 
 	public static String normalizeStartupMode(StartupMode startupMode) {
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseMigrationTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseMigrationTest.java
index 768ac16547c..6776cd5eb22 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseMigrationTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseMigrationTest.java
@@ -93,7 +93,7 @@
 
 	@Parameterized.Parameters(name = "Migration Savepoint: {0}")
 	public static Collection<MigrationVersion> parameters () {
-		return Arrays.asList(MigrationVersion.v1_2, MigrationVersion.v1_3, MigrationVersion.v1_4);
+		return Arrays.asList(MigrationVersion.v1_2, MigrationVersion.v1_3, MigrationVersion.v1_4, MigrationVersion.v1_5);
 	}
 
 	public FlinkKafkaConsumerBaseMigrationTest(MigrationVersion testMigrateVersion) {
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSourceFactoryTestBase.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSourceFactoryTestBase.java
index 51017f415d8..51c0e7bbde2 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSourceFactoryTestBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSourceFactoryTestBase.java
@@ -28,10 +28,11 @@
 import org.apache.flink.table.descriptors.Kafka;
 import org.apache.flink.table.descriptors.Rowtime;
 import org.apache.flink.table.descriptors.Schema;
-import org.apache.flink.table.descriptors.TestTableSourceDescriptor;
+import org.apache.flink.table.descriptors.TestTableDescriptor;
 import org.apache.flink.table.factories.StreamTableSourceFactory;
 import org.apache.flink.table.factories.TableFactoryService;
 import org.apache.flink.table.sources.TableSource;
+import org.apache.flink.table.sources.TableSourceUtil;
 import org.apache.flink.table.sources.tsextractors.ExistingField;
 import org.apache.flink.table.sources.wmstrategies.AscendingTimestamps;
 
@@ -101,7 +102,9 @@ private void testTableSource(FormatDescriptor format) {
 
 		final Map<String, String> tableJsonMapping = new HashMap<>();
 		tableJsonMapping.put("fruit-name", "name");
+		tableJsonMapping.put("name", "name");
 		tableJsonMapping.put("count", "count");
+		tableJsonMapping.put("time", "time");
 
 		final Properties props = new Properties();
 		props.put("group.id", "test-group");
@@ -129,26 +132,29 @@ private void testTableSource(FormatDescriptor format) {
 				.withRowtimeAttribute("event-time", new ExistingField("time"), new AscendingTimestamps())
 				.build();
 
+		TableSourceUtil.validateTableSource(builderSource);
+
 		// construct table source using descriptors and table source factory
 
 		final Map<Integer, Long> offsets = new HashMap<>();
 		offsets.put(0, 100L);
 		offsets.put(1, 123L);
 
-		final TestTableSourceDescriptor testDesc = new TestTableSourceDescriptor(
+		final TestTableDescriptor testDesc = new TestTableDescriptor(
 				new Kafka()
 					.version(version())
 					.topic(TOPIC)
 					.properties(props)
 					.startFromSpecificOffsets(offsets))
-			.addFormat(format)
-			.addSchema(
+			.withFormat(format)
+			.withSchema(
 				new Schema()
 						.field("fruit-name", Types.STRING).from("name")
 						.field("count", Types.BIG_DEC) // no from so it must match with the input
 						.field("event-time", Types.SQL_TIMESTAMP).rowtime(
 							new Rowtime().timestampsFromField("time").watermarksPeriodicAscending())
-						.field("proc-time", Types.SQL_TIMESTAMP).proctime());
+						.field("proc-time", Types.SQL_TIMESTAMP).proctime())
+			.inAppendMode();
 
 		DescriptorProperties properties = new DescriptorProperties(true);
 		testDesc.addProperties(properties);
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSinkTestBase.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSinkTestBase.java
index a87c6228598..b4bb89dc048 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSinkTestBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSinkTestBase.java
@@ -30,6 +30,7 @@
 
 import org.junit.Test;
 
+import java.util.Optional;
 import java.util.Properties;
 
 import static org.junit.Assert.assertArrayEquals;
@@ -44,7 +45,11 @@
 
 /**
  * Abstract test base for all Kafka table sink tests.
+ *
+ * @deprecated Ensures backwards compatibility with Flink 1.5. Can be removed once we
+ *             drop support for format-specific table sinks.
  */
+@Deprecated
 public abstract class KafkaTableSinkTestBase {
 
 	private static final String TOPIC = "testTopic";
@@ -55,7 +60,7 @@
 
 	@SuppressWarnings("unchecked")
 	@Test
-	public void testKafkaTableSink() throws Exception {
+	public void testKafkaTableSink() {
 		DataStream dataStream = mock(DataStream.class);
 		when(dataStream.addSink(any(SinkFunction.class))).thenReturn(mock(DataStreamSink.class));
 
@@ -70,7 +75,7 @@ public void testKafkaTableSink() throws Exception {
 			eq(TOPIC),
 			eq(PROPERTIES),
 			any(getSerializationSchemaClass()),
-			eq(PARTITIONER));
+			eq(Optional.of(PARTITIONER)));
 	}
 
 	@Test
@@ -94,7 +99,8 @@ protected abstract KafkaTableSink createTableSink(
 	protected abstract Class<? extends FlinkKafkaProducerBase> getProducerClass();
 
 	private KafkaTableSink createTableSink() {
-		return createTableSink(TOPIC, PROPERTIES, PARTITIONER);
+		KafkaTableSink sink = createTableSink(TOPIC, PROPERTIES, PARTITIONER);
+		return sink.configure(FIELD_NAMES, FIELD_TYPES);
 	}
 
 	private static Properties createSinkProperties() {
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceFactoryTestBase.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryTestBase.java
similarity index 56%
rename from flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceFactoryTestBase.java
rename to flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryTestBase.java
index 1e8266d9025..5e9144c1e57 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceFactoryTestBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryTestBase.java
@@ -20,24 +20,37 @@
 
 import org.apache.flink.api.common.JobExecutionResult;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.datastream.DataStreamSink;
 import org.apache.flink.streaming.api.datastream.DataStreamSource;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
+import org.apache.flink.streaming.api.functions.sink.SinkFunction;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
+import org.apache.flink.streaming.api.operators.ChainingStrategy;
+import org.apache.flink.streaming.api.transformations.StreamTransformation;
 import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
 import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.api.Types;
 import org.apache.flink.table.descriptors.DescriptorProperties;
 import org.apache.flink.table.descriptors.Kafka;
 import org.apache.flink.table.descriptors.Rowtime;
 import org.apache.flink.table.descriptors.Schema;
-import org.apache.flink.table.descriptors.TestTableSourceDescriptor;
+import org.apache.flink.table.descriptors.TestTableDescriptor;
+import org.apache.flink.table.factories.StreamTableSinkFactory;
 import org.apache.flink.table.factories.StreamTableSourceFactory;
 import org.apache.flink.table.factories.TableFactoryService;
 import org.apache.flink.table.factories.utils.TestDeserializationSchema;
+import org.apache.flink.table.factories.utils.TestSerializationSchema;
 import org.apache.flink.table.factories.utils.TestTableFormat;
+import org.apache.flink.table.sinks.TableSink;
 import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
 import org.apache.flink.table.sources.TableSource;
+import org.apache.flink.table.sources.TableSourceUtil;
 import org.apache.flink.table.sources.tsextractors.ExistingField;
 import org.apache.flink.table.sources.wmstrategies.AscendingTimestamps;
 import org.apache.flink.types.Row;
@@ -45,6 +58,7 @@
 
 import org.junit.Test;
 
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -56,9 +70,9 @@
 import static org.junit.Assert.assertTrue;
 
 /**
- * Abstract test base for {@link KafkaTableSourceFactory}.
+ * Abstract test base for {@link KafkaTableSourceSinkFactoryBase}.
  */
-public abstract class KafkaTableSourceFactoryTestBase extends TestLogger {
+public abstract class KafkaTableSourceSinkFactoryTestBase extends TestLogger {
 
 	private static final String TOPIC = "myTopic";
 	private static final int PARTITION_0 = 0;
@@ -75,6 +89,13 @@
 	static {
 		KAFKA_PROPERTIES.setProperty("zookeeper.connect", "dummy");
 		KAFKA_PROPERTIES.setProperty("group.id", "dummy");
+		KAFKA_PROPERTIES.setProperty("bootstrap.servers", "dummy");
+	}
+
+	private static final Map<Integer, Long> OFFSETS = new HashMap<>();
+	static {
+		OFFSETS.put(PARTITION_0, OFFSET_0);
+		OFFSETS.put(PARTITION_1, OFFSET_1);
 	}
 
 	@Test
@@ -95,7 +116,9 @@ public void testTableSource() {
 
 		final Map<String, String> fieldMapping = new HashMap<>();
 		fieldMapping.put(FRUIT_NAME, NAME);
+		fieldMapping.put(NAME, NAME);
 		fieldMapping.put(COUNT, COUNT);
+		fieldMapping.put(TIME, TIME);
 
 		final Map<KafkaTopicPartition, Long> specificOffsets = new HashMap<>();
 		specificOffsets.put(new KafkaTopicPartition(TOPIC, PARTITION_0), OFFSET_0);
@@ -110,8 +133,6 @@ public void testTableSource() {
 				.toRowType()
 		);
 
-		final StartupMode startupMode = StartupMode.SPECIFIC_OFFSETS;
-
 		final KafkaTableSource expected = getExpectedKafkaTableSource(
 			schema,
 			Optional.of(PROC_TIME),
@@ -120,34 +141,32 @@ public void testTableSource() {
 			TOPIC,
 			KAFKA_PROPERTIES,
 			deserializationSchema,
-			startupMode,
+			StartupMode.SPECIFIC_OFFSETS,
 			specificOffsets);
 
-		// construct table source using descriptors and table source factory
+		TableSourceUtil.validateTableSource(expected);
 
-		final Map<Integer, Long> offsets = new HashMap<>();
-		offsets.put(PARTITION_0, OFFSET_0);
-		offsets.put(PARTITION_1, OFFSET_1);
+		// construct table source using descriptors and table source factory
 
-		final TestTableSourceDescriptor testDesc = new TestTableSourceDescriptor(
+		final TestTableDescriptor testDesc = new TestTableDescriptor(
 				new Kafka()
 					.version(getKafkaVersion())
 					.topic(TOPIC)
 					.properties(KAFKA_PROPERTIES)
-					.startFromSpecificOffsets(offsets))
-			.addFormat(new TestTableFormat())
-			.addSchema(
+					.sinkPartitionerRoundRobin() // test if accepted although not needed
+					.startFromSpecificOffsets(OFFSETS))
+			.withFormat(new TestTableFormat())
+			.withSchema(
 				new Schema()
 					.field(FRUIT_NAME, Types.STRING()).from(NAME)
 					.field(COUNT, Types.DECIMAL()) // no from so it must match with the input
 					.field(EVENT_TIME, Types.SQL_TIMESTAMP()).rowtime(
 						new Rowtime().timestampsFromField(TIME).watermarksPeriodicAscending())
-					.field(PROC_TIME, Types.SQL_TIMESTAMP()).proctime());
-		final DescriptorProperties descriptorProperties = new DescriptorProperties(true);
-		testDesc.addProperties(descriptorProperties);
-		final Map<String, String> propertiesMap = descriptorProperties.asMap();
+					.field(PROC_TIME, Types.SQL_TIMESTAMP()).proctime())
+			.inAppendMode();
 
-		final TableSource<?> actualSource = TableFactoryService.find(StreamTableSourceFactory.class, testDesc)
+		final Map<String, String> propertiesMap = DescriptorProperties.toJavaMap(testDesc);
+		final TableSource<?> actualSource = TableFactoryService.find(StreamTableSourceFactory.class, propertiesMap)
 			.createStreamTableSource(propertiesMap);
 
 		assertEquals(expected, actualSource);
@@ -156,17 +175,67 @@ public void testTableSource() {
 		final KafkaTableSource actualKafkaSource = (KafkaTableSource) actualSource;
 		final StreamExecutionEnvironmentMock mock = new StreamExecutionEnvironmentMock();
 		actualKafkaSource.getDataStream(mock);
-		assertTrue(getExpectedFlinkKafkaConsumer().isAssignableFrom(mock.function.getClass()));
+		assertTrue(getExpectedFlinkKafkaConsumer().isAssignableFrom(mock.sourceFunction.getClass()));
+	}
+
+	/**
+	 * This test can be unified with the corresponding source test once we have fixed FLINK-9870.
+	 */
+	@Test
+	public void testTableSink() {
+		// prepare parameters for Kafka table sink
+
+		final TableSchema schema = TableSchema.builder()
+			.field(FRUIT_NAME, Types.STRING())
+			.field(COUNT, Types.DECIMAL())
+			.field(EVENT_TIME, Types.SQL_TIMESTAMP())
+			.build();
+
+		final KafkaTableSink expected = getExpectedKafkaTableSink(
+			schema,
+			TOPIC,
+			KAFKA_PROPERTIES,
+			Optional.of(new FlinkFixedPartitioner<>()),
+			new TestSerializationSchema(schema.toRowType()));
+
+		// construct table sink using descriptors and table sink factory
+
+		final TestTableDescriptor testDesc = new TestTableDescriptor(
+				new Kafka()
+					.version(getKafkaVersion())
+					.topic(TOPIC)
+					.properties(KAFKA_PROPERTIES)
+					.sinkPartitionerFixed()
+					.startFromSpecificOffsets(OFFSETS)) // test if they accepted although not needed
+			.withFormat(new TestTableFormat())
+			.withSchema(
+				new Schema()
+					.field(FRUIT_NAME, Types.STRING())
+					.field(COUNT, Types.DECIMAL())
+					.field(EVENT_TIME, Types.SQL_TIMESTAMP()))
+			.inAppendMode();
+
+		final Map<String, String> propertiesMap = DescriptorProperties.toJavaMap(testDesc);
+		final TableSink<?> actualSink = TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
+			.createStreamTableSink(propertiesMap);
+
+		assertEquals(expected, actualSink);
+
+		// test Kafka producer
+		final KafkaTableSink actualKafkaSink = (KafkaTableSink) actualSink;
+		final DataStreamMock streamMock = new DataStreamMock(new StreamExecutionEnvironmentMock(), schema.toRowType());
+		actualKafkaSink.emitDataStream(streamMock);
+		assertTrue(getExpectedFlinkKafkaProducer().isAssignableFrom(streamMock.sinkFunction.getClass()));
 	}
 
 	private static class StreamExecutionEnvironmentMock extends StreamExecutionEnvironment {
 
-		public SourceFunction<?> function;
+		public SourceFunction<?> sourceFunction;
 
 		@Override
-		public <OUT> DataStreamSource<OUT> addSource(SourceFunction<OUT> function) {
-			this.function = function;
-			return super.addSource(function);
+		public <OUT> DataStreamSource<OUT> addSource(SourceFunction<OUT> sourceFunction) {
+			this.sourceFunction = sourceFunction;
+			return super.addSource(sourceFunction);
 		}
 
 		@Override
@@ -175,6 +244,38 @@ public JobExecutionResult execute(String jobName) {
 		}
 	}
 
+	private static class DataStreamMock extends DataStream<Row> {
+
+		public SinkFunction<?> sinkFunction;
+
+		public DataStreamMock(StreamExecutionEnvironment environment, TypeInformation<Row> outType) {
+			super(environment, new StreamTransformationMock("name", outType, 1));
+		}
+
+		@Override
+		public DataStreamSink<Row> addSink(SinkFunction<Row> sinkFunction) {
+			this.sinkFunction = sinkFunction;
+			return super.addSink(sinkFunction);
+		}
+	}
+
+	private static class StreamTransformationMock extends StreamTransformation<Row> {
+
+		public StreamTransformationMock(String name, TypeInformation<Row> outputType, int parallelism) {
+			super(name, outputType, parallelism);
+		}
+
+		@Override
+		public void setChainingStrategy(ChainingStrategy strategy) {
+			// do nothing
+		}
+
+		@Override
+		public Collection<StreamTransformation<?>> getTransitivePredecessors() {
+			return null;
+		}
+	}
+
 	// --------------------------------------------------------------------------------------------
 	// For version-specific tests
 	// --------------------------------------------------------------------------------------------
@@ -183,13 +284,23 @@ public JobExecutionResult execute(String jobName) {
 
 	protected abstract Class<FlinkKafkaConsumerBase<Row>> getExpectedFlinkKafkaConsumer();
 
+	protected abstract Class<?> getExpectedFlinkKafkaProducer();
+
 	protected abstract KafkaTableSource getExpectedKafkaTableSource(
 		TableSchema schema,
 		Optional<String> proctimeAttribute,
 		List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
 		Map<String, String> fieldMapping,
-		String topic, Properties properties,
+		String topic,
+		Properties properties,
 		DeserializationSchema<Row> deserializationSchema,
 		StartupMode startupMode,
 		Map<KafkaTopicPartition, Long> specificStartupOffsets);
+
+	protected abstract KafkaTableSink getExpectedKafkaTableSink(
+		TableSchema schema,
+		String topic,
+		Properties properties,
+		Optional<FlinkKafkaPartitioner<Row>> partitioner,
+		SerializationSchema<Row> serializationSchema);
 }
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/table/descriptors/KafkaTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/table/descriptors/KafkaTest.java
index f3d96f1c443..c67bc4dcf20 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/table/descriptors/KafkaTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/table/descriptors/KafkaTest.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.table.descriptors;
 
+import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
+
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
@@ -59,7 +61,8 @@
 				.version("0.11")
 				.topic("MyTable")
 				.startFromSpecificOffsets(offsets)
-				.properties(properties);
+				.properties(properties)
+				.sinkPartitionerCustom(FlinkFixedPartitioner.class);
 
 		return Arrays.asList(earliestDesc, specificOffsetsDesc, specificOffsetsMapDesc);
 	}
@@ -102,6 +105,8 @@
 		props3.put("connector.properties.0.value", "12");
 		props3.put("connector.properties.1.key", "kafka.stuff");
 		props3.put("connector.properties.1.value", "42");
+		props3.put("connector.sink-partitioner", "custom");
+		props3.put("connector.sink-partitioner-class", FlinkFixedPartitioner.class.getName());
 
 		return Arrays.asList(props1, props2, props3);
 	}
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/resources/kafka-consumer-migration-test-flink1.5-empty-state-snapshot b/flink-connectors/flink-connector-kafka-base/src/test/resources/kafka-consumer-migration-test-flink1.5-empty-state-snapshot
new file mode 100644
index 00000000000..affc8b9b756
Binary files /dev/null and b/flink-connectors/flink-connector-kafka-base/src/test/resources/kafka-consumer-migration-test-flink1.5-empty-state-snapshot differ
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/resources/kafka-consumer-migration-test-flink1.5-snapshot b/flink-connectors/flink-connector-kafka-base/src/test/resources/kafka-consumer-migration-test-flink1.5-snapshot
new file mode 100644
index 00000000000..773b363b859
Binary files /dev/null and b/flink-connectors/flink-connector-kafka-base/src/test/resources/kafka-consumer-migration-test-flink1.5-snapshot differ
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
index e46f79e9b73..443b19ec382 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
@@ -92,6 +92,9 @@ public SentinelSequenceNumber toSentinelSequenceNumber() {
 	/** The power constant for exponential backoff between each describeStream attempt. */
 	public static final String STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT = "flink.stream.describe.backoff.expconst";
 
+	/** The maximum number of listShards attempts if we get a recoverable exception. */
+	public static final String LIST_SHARDS_RETRIES = "flink.list.shards.maxretries";
+
 	/** The base backoff time between each listShards attempt. */
 	public static final String LIST_SHARDS_BACKOFF_BASE = "flink.list.shards.backoff.base";
 
@@ -104,7 +107,7 @@ public SentinelSequenceNumber toSentinelSequenceNumber() {
 	/** The maximum number of records to try to get each time we fetch records from a AWS Kinesis shard. */
 	public static final String SHARD_GETRECORDS_MAX = "flink.shard.getrecords.maxrecordcount";
 
-	/** The maximum number of getRecords attempts if we get ProvisionedThroughputExceededException. */
+	/** The maximum number of getRecords attempts if we get a recoverable exception. */
 	public static final String SHARD_GETRECORDS_RETRIES = "flink.shard.getrecords.maxretries";
 
 	/** The base backoff time between getRecords attempts if we get a ProvisionedThroughputExceededException. */
@@ -134,6 +137,10 @@ public SentinelSequenceNumber toSentinelSequenceNumber() {
 	/** The interval between each attempt to discover new shards. */
 	public static final String SHARD_DISCOVERY_INTERVAL_MILLIS = "flink.shard.discovery.intervalmillis";
 
+	/** The config to turn on adaptive reads from a shard. */
+	public static final String SHARD_USE_ADAPTIVE_READS = "flink.shard.adaptivereads";
+
+
 	// ------------------------------------------------------------------------
 	//  Default values for consumer configuration
 	// ------------------------------------------------------------------------
@@ -157,6 +164,8 @@ public SentinelSequenceNumber toSentinelSequenceNumber() {
 
 	public static final double DEFAULT_LIST_SHARDS_BACKOFF_EXPONENTIAL_CONSTANT = 1.5;
 
+	public static final int DEFAULT_LIST_SHARDS_RETRIES = 10;
+
 	public static final int DEFAULT_SHARD_GETRECORDS_MAX = 10000;
 
 	public static final int DEFAULT_SHARD_GETRECORDS_RETRIES = 3;
@@ -179,6 +188,8 @@ public SentinelSequenceNumber toSentinelSequenceNumber() {
 
 	public static final long DEFAULT_SHARD_DISCOVERY_INTERVAL_MILLIS = 10000L;
 
+	public static final boolean DEFAULT_SHARD_USE_ADAPTIVE_READS = false;
+
 	/**
 	 * To avoid shard iterator expires in {@link ShardConsumer}s, the value for the configured
 	 * getRecords interval can not exceed 5 minutes, which is the expire time for retrieved iterators.
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
index 65de24c23d3..0981b76ce89 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
@@ -163,6 +163,9 @@
 	/** Reference to the first error thrown by any of the {@link ShardConsumer} threads. */
 	private final AtomicReference<Throwable> error;
 
+	/** The Kinesis proxy factory that will be used to create instances for discovery and shard consumers. */
+	private final FlinkKinesisProxyFactory kinesisProxyFactory;
+
 	/** The Kinesis proxy that the fetcher will be using to discover new shards. */
 	private final KinesisProxyInterface kinesis;
 
@@ -179,6 +182,13 @@
 
 	private volatile boolean running = true;
 
+	/**
+	 * Factory to create Kinesis proxy instances used by a fetcher.
+	 */
+	public interface FlinkKinesisProxyFactory {
+		KinesisProxyInterface create(Properties configProps);
+	}
+
 	/**
 	 * Creates a Kinesis Data Fetcher.
 	 *
@@ -204,7 +214,7 @@ public KinesisDataFetcher(List<String> streams,
 			new AtomicReference<>(),
 			new ArrayList<>(),
 			createInitialSubscribedStreamsToLastDiscoveredShardsState(streams),
-			KinesisProxy.create(configProps));
+			KinesisProxy::create);
 	}
 
 	@VisibleForTesting
@@ -218,7 +228,7 @@ protected KinesisDataFetcher(List<String> streams,
 								AtomicReference<Throwable> error,
 								List<KinesisStreamShardState> subscribedShardsState,
 								HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds,
-								KinesisProxyInterface kinesis) {
+								FlinkKinesisProxyFactory kinesisProxyFactory) {
 		this.streams = checkNotNull(streams);
 		this.configProps = checkNotNull(configProps);
 		this.sourceContext = checkNotNull(sourceContext);
@@ -228,7 +238,8 @@ protected KinesisDataFetcher(List<String> streams,
 		this.indexOfThisConsumerSubtask = runtimeContext.getIndexOfThisSubtask();
 		this.deserializationSchema = checkNotNull(deserializationSchema);
 		this.shardAssigner = checkNotNull(shardAssigner);
-		this.kinesis = checkNotNull(kinesis);
+		this.kinesisProxyFactory = checkNotNull(kinesisProxyFactory);
+		this.kinesis = kinesisProxyFactory.create(configProps);
 
 		this.consumerMetricGroup = runtimeContext.getMetricGroup()
 			.addGroup(KinesisConsumerMetricConstants.KINESIS_CONSUMER_METRICS_GROUP);
@@ -241,6 +252,29 @@ protected KinesisDataFetcher(List<String> streams,
 			createShardConsumersThreadPool(runtimeContext.getTaskNameWithSubtasks());
 	}
 
+	/**
+	 * Create a new shard consumer.
+	 * Override this method to customize shard consumer behavior in subclasses.
+	 * @param subscribedShardStateIndex the state index of the shard this consumer is subscribed to
+	 * @param subscribedShard the shard this consumer is subscribed to
+	 * @param lastSequenceNum the sequence number in the shard to start consuming
+	 * @param shardMetricsReporter the reporter to report metrics to
+	 * @return shard consumer
+	 */
+	protected ShardConsumer createShardConsumer(
+		Integer subscribedShardStateIndex,
+		StreamShardHandle subscribedShard,
+		SequenceNumber lastSequenceNum,
+		ShardMetricsReporter shardMetricsReporter) {
+		return new ShardConsumer<>(
+			this,
+			subscribedShardStateIndex,
+			subscribedShard,
+			lastSequenceNum,
+			this.kinesisProxyFactory.create(configProps),
+			shardMetricsReporter);
+	}
+
 	/**
 	 * Starts the fetcher. After starting the fetcher, it can only
 	 * be stopped by calling {@link KinesisDataFetcher#shutdownFetcher()}.
@@ -297,8 +331,7 @@ public void runFetcher() throws Exception {
 					}
 
 				shardConsumersExecutor.submit(
-					new ShardConsumer<>(
-						this,
+					createShardConsumer(
 						seededStateIndex,
 						subscribedShardsState.get(seededStateIndex).getStreamShardHandle(),
 						subscribedShardsState.get(seededStateIndex).getLastProcessedSequenceNum(),
@@ -344,8 +377,7 @@ public void runFetcher() throws Exception {
 				}
 
 				shardConsumersExecutor.submit(
-					new ShardConsumer<>(
-						this,
+					createShardConsumer(
 						newStateIndex,
 						newShardState.getStreamShardHandle(),
 						newShardState.getLastProcessedSequenceNum(),
@@ -598,7 +630,14 @@ private static ShardMetricsReporter registerShardMetrics(MetricGroup metricGroup
 				shardState.getStreamShardHandle().getShard().getShardId());
 
 		streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.MILLIS_BEHIND_LATEST_GAUGE, shardMetrics::getMillisBehindLatest);
-
+		streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.MAX_RECORDS_PER_FETCH, shardMetrics::getMaxNumberOfRecordsPerFetch);
+		streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.NUM_AGGREGATED_RECORDS_PER_FETCH, shardMetrics::getNumberOfAggregatedRecords);
+		streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.NUM_DEAGGREGATED_RECORDS_PER_FETCH, shardMetrics::getNumberOfDeaggregatedRecords);
+		streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.AVG_RECORD_SIZE_BYTES, shardMetrics::getAverageRecordSizeBytes);
+		streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.BYTES_PER_READ, shardMetrics::getBytesPerRead);
+		streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.RUNTIME_LOOP_NANOS, shardMetrics::getRunLoopTimeNanos);
+		streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.LOOP_FREQUENCY_HZ, shardMetrics::getLoopFrequencyHz);
+		streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.SLEEP_TIME_MILLIS, shardMetrics::getSleepTimeMillis);
 		return shardMetrics;
 	}
 
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
index 30f00163ca6..5845eea8f88 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
@@ -24,7 +24,6 @@
 import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
-import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxy;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
 
@@ -56,6 +55,10 @@
 
 	private static final Logger LOG = LoggerFactory.getLogger(ShardConsumer.class);
 
+	// AWS Kinesis has a read limit of 2 Mb/sec
+	// https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html
+	private static final long KINESIS_SHARD_BYTES_PER_SECOND_LIMIT = 2 * 1024L * 1024L;
+
 	private final KinesisDeserializationSchema<T> deserializer;
 
 	private final KinesisProxyInterface kinesis;
@@ -66,8 +69,9 @@
 
 	private final StreamShardHandle subscribedShard;
 
-	private final int maxNumberOfRecordsPerFetch;
+	private int maxNumberOfRecordsPerFetch;
 	private final long fetchIntervalMillis;
+	private final boolean useAdaptiveReads;
 
 	private final ShardMetricsReporter shardMetricsReporter;
 
@@ -82,28 +86,15 @@
 	 * @param subscribedShardStateIndex the state index of the shard this consumer is subscribed to
 	 * @param subscribedShard the shard this consumer is subscribed to
 	 * @param lastSequenceNum the sequence number in the shard to start consuming
+	 * @param kinesis the proxy instance to interact with Kinesis
 	 * @param shardMetricsReporter the reporter to report metrics to
 	 */
 	public ShardConsumer(KinesisDataFetcher<T> fetcherRef,
 						Integer subscribedShardStateIndex,
 						StreamShardHandle subscribedShard,
 						SequenceNumber lastSequenceNum,
+						KinesisProxyInterface kinesis,
 						ShardMetricsReporter shardMetricsReporter) {
-		this(fetcherRef,
-			subscribedShardStateIndex,
-			subscribedShard,
-			lastSequenceNum,
-			KinesisProxy.create(fetcherRef.getConsumerConfiguration()),
-			shardMetricsReporter);
-	}
-
-	/** This constructor is exposed for testing purposes. */
-	protected ShardConsumer(KinesisDataFetcher<T> fetcherRef,
-							Integer subscribedShardStateIndex,
-							StreamShardHandle subscribedShard,
-							SequenceNumber lastSequenceNum,
-							KinesisProxyInterface kinesis,
-							ShardMetricsReporter shardMetricsReporter) {
 		this.fetcherRef = checkNotNull(fetcherRef);
 		this.subscribedShardStateIndex = checkNotNull(subscribedShardStateIndex);
 		this.subscribedShard = checkNotNull(subscribedShard);
@@ -125,6 +116,9 @@ protected ShardConsumer(KinesisDataFetcher<T> fetcherRef,
 		this.fetchIntervalMillis = Long.valueOf(consumerConfig.getProperty(
 			ConsumerConfigConstants.SHARD_GETRECORDS_INTERVAL_MILLIS,
 			Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_INTERVAL_MILLIS)));
+		this.useAdaptiveReads = Boolean.valueOf(consumerConfig.getProperty(
+			ConsumerConfigConstants.SHARD_USE_ADAPTIVE_READS,
+			Boolean.toString(ConsumerConfigConstants.DEFAULT_SHARD_USE_ADAPTIVE_READS)));
 
 		if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM.get())) {
 			String timestamp = consumerConfig.getProperty(ConsumerConfigConstants.STREAM_INITIAL_TIMESTAMP);
@@ -144,62 +138,73 @@ protected ShardConsumer(KinesisDataFetcher<T> fetcherRef,
 		}
 	}
 
-	@SuppressWarnings("unchecked")
-	@Override
-	public void run() {
+	/**
+	 * Find the initial shard iterator to start getting records from.
+	 * @return shard iterator
+	 * @throws Exception
+	 */
+	protected String getInitialShardIterator() throws Exception {
 		String nextShardItr;
 
-		try {
-			// before infinitely looping, we set the initial nextShardItr appropriately
+		// before infinitely looping, we set the initial nextShardItr appropriately
 
-			if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM.get())) {
-				// if the shard is already closed, there will be no latest next record to get for this shard
-				if (subscribedShard.isClosed()) {
-					nextShardItr = null;
-				} else {
-					nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.LATEST.toString(), null);
-				}
-			} else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get())) {
-				nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.TRIM_HORIZON.toString(), null);
-			} else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
+		if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM.get())) {
+			// if the shard is already closed, there will be no latest next record to get for this shard
+			if (subscribedShard.isClosed()) {
 				nextShardItr = null;
-			} else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM.get())) {
-				nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.AT_TIMESTAMP.toString(), initTimestamp);
 			} else {
-				// we will be starting from an actual sequence number (due to restore from failure).
-				// if the last sequence number refers to an aggregated record, we need to clean up any dangling sub-records
-				// from the last aggregated record; otherwise, we can simply start iterating from the record right after.
-
-				if (lastSequenceNum.isAggregated()) {
-					String itrForLastAggregatedRecord =
-						kinesis.getShardIterator(subscribedShard, ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), lastSequenceNum.getSequenceNumber());
-
-					// get only the last aggregated record
-					GetRecordsResult getRecordsResult = getRecords(itrForLastAggregatedRecord, 1);
-
-					List<UserRecord> fetchedRecords = deaggregateRecords(
-						getRecordsResult.getRecords(),
-						subscribedShard.getShard().getHashKeyRange().getStartingHashKey(),
-						subscribedShard.getShard().getHashKeyRange().getEndingHashKey());
-
-					long lastSubSequenceNum = lastSequenceNum.getSubSequenceNumber();
-					for (UserRecord record : fetchedRecords) {
-						// we have found a dangling sub-record if it has a larger subsequence number
-						// than our last sequence number; if so, collect the record and update state
-						if (record.getSubSequenceNumber() > lastSubSequenceNum) {
-							deserializeRecordForCollectionAndUpdateState(record);
-						}
+				nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.LATEST.toString(), null);
+			}
+		} else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get())) {
+			nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.TRIM_HORIZON.toString(), null);
+		} else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
+			nextShardItr = null;
+		} else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM.get())) {
+			nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.AT_TIMESTAMP.toString(), initTimestamp);
+		} else {
+			// we will be starting from an actual sequence number (due to restore from failure).
+			// if the last sequence number refers to an aggregated record, we need to clean up any dangling sub-records
+			// from the last aggregated record; otherwise, we can simply start iterating from the record right after.
+
+			if (lastSequenceNum.isAggregated()) {
+				String itrForLastAggregatedRecord =
+					kinesis.getShardIterator(subscribedShard, ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), lastSequenceNum.getSequenceNumber());
+
+				// get only the last aggregated record
+				GetRecordsResult getRecordsResult = getRecords(itrForLastAggregatedRecord, 1);
+
+				List<UserRecord> fetchedRecords = deaggregateRecords(
+					getRecordsResult.getRecords(),
+					subscribedShard.getShard().getHashKeyRange().getStartingHashKey(),
+					subscribedShard.getShard().getHashKeyRange().getEndingHashKey());
+
+				long lastSubSequenceNum = lastSequenceNum.getSubSequenceNumber();
+				for (UserRecord record : fetchedRecords) {
+					// we have found a dangling sub-record if it has a larger subsequence number
+					// than our last sequence number; if so, collect the record and update state
+					if (record.getSubSequenceNumber() > lastSubSequenceNum) {
+						deserializeRecordForCollectionAndUpdateState(record);
 					}
-
-					// set the nextShardItr so we can continue iterating in the next while loop
-					nextShardItr = getRecordsResult.getNextShardIterator();
-				} else {
-					// the last record was non-aggregated, so we can simply start from the next record
-					nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), lastSequenceNum.getSequenceNumber());
 				}
+
+				// set the nextShardItr so we can continue iterating in the next while loop
+				nextShardItr = getRecordsResult.getNextShardIterator();
+			} else {
+				// the last record was non-aggregated, so we can simply start from the next record
+				nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), lastSequenceNum.getSequenceNumber());
 			}
+		}
+		return nextShardItr;
+	}
+
+	@SuppressWarnings("unchecked")
+	@Override
+	public void run() {
+		try {
+			String nextShardItr = getInitialShardIterator();
+
+			long processingStartTimeNanos = System.nanoTime();
 
-			long lastTimeNanos = 0;
 			while (isRunning()) {
 				if (nextShardItr == null) {
 					fetcherRef.updateState(subscribedShardStateIndex, SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get());
@@ -207,28 +212,35 @@ public void run() {
 					// we can close this consumer thread once we've reached the end of the subscribed shard
 					break;
 				} else {
-					if (fetchIntervalMillis != 0) {
-						long elapsedTimeNanos = System.nanoTime() - lastTimeNanos;
-						long sleepTimeMillis = fetchIntervalMillis - (elapsedTimeNanos / 1_000_000);
-						if (sleepTimeMillis > 0) {
-							Thread.sleep(sleepTimeMillis);
-						}
-						lastTimeNanos = System.nanoTime();
-					}
-
+					shardMetricsReporter.setMaxNumberOfRecordsPerFetch(maxNumberOfRecordsPerFetch);
 					GetRecordsResult getRecordsResult = getRecords(nextShardItr, maxNumberOfRecordsPerFetch);
 
+					List<Record> aggregatedRecords = getRecordsResult.getRecords();
+					int numberOfAggregatedRecords = aggregatedRecords.size();
+					shardMetricsReporter.setNumberOfAggregatedRecords(numberOfAggregatedRecords);
+
 					// each of the Kinesis records may be aggregated, so we must deaggregate them before proceeding
 					List<UserRecord> fetchedRecords = deaggregateRecords(
-						getRecordsResult.getRecords(),
+						aggregatedRecords,
 						subscribedShard.getShard().getHashKeyRange().getStartingHashKey(),
 						subscribedShard.getShard().getHashKeyRange().getEndingHashKey());
 
+					long recordBatchSizeBytes = 0L;
 					for (UserRecord record : fetchedRecords) {
+						recordBatchSizeBytes += record.getData().remaining();
 						deserializeRecordForCollectionAndUpdateState(record);
 					}
 
+					int numberOfDeaggregatedRecords = fetchedRecords.size();
+					shardMetricsReporter.setNumberOfDeaggregatedRecords(numberOfDeaggregatedRecords);
+
 					nextShardItr = getRecordsResult.getNextShardIterator();
+
+					long adjustmentEndTimeNanos = adjustRunLoopFrequency(processingStartTimeNanos, System.nanoTime());
+					long runLoopTimeNanos = adjustmentEndTimeNanos - processingStartTimeNanos;
+					maxNumberOfRecordsPerFetch = adaptRecordsToRead(runLoopTimeNanos, fetchedRecords.size(), recordBatchSizeBytes, maxNumberOfRecordsPerFetch);
+					shardMetricsReporter.setRunLoopTimeNanos(runLoopTimeNanos);
+					processingStartTimeNanos = adjustmentEndTimeNanos; // for next time through the loop
 				}
 			}
 		} catch (Throwable t) {
@@ -236,6 +248,56 @@ public void run() {
 		}
 	}
 
+	/**
+	 * Adjusts loop timing to match target frequency if specified.
+	 * @param processingStartTimeNanos The start time of the run loop "work"
+	 * @param processingEndTimeNanos The end time of the run loop "work"
+	 * @return The System.nanoTime() after the sleep (if any)
+	 * @throws InterruptedException
+	 */
+	protected long adjustRunLoopFrequency(long processingStartTimeNanos, long processingEndTimeNanos)
+		throws InterruptedException {
+		long endTimeNanos = processingEndTimeNanos;
+		if (fetchIntervalMillis != 0) {
+			long processingTimeNanos = processingEndTimeNanos - processingStartTimeNanos;
+			long sleepTimeMillis = fetchIntervalMillis - (processingTimeNanos / 1_000_000);
+			if (sleepTimeMillis > 0) {
+				Thread.sleep(sleepTimeMillis);
+				endTimeNanos = System.nanoTime();
+				shardMetricsReporter.setSleepTimeMillis(sleepTimeMillis);
+			}
+		}
+		return endTimeNanos;
+	}
+
+	/**
+	 * Calculates how many records to read each time through the loop based on a target throughput
+	 * and the measured frequenecy of the loop.
+	 * @param runLoopTimeNanos The total time of one pass through the loop
+	 * @param numRecords The number of records of the last read operation
+	 * @param recordBatchSizeBytes The total batch size of the last read operation
+	 * @param maxNumberOfRecordsPerFetch The current maxNumberOfRecordsPerFetch
+	 */
+	private int adaptRecordsToRead(long runLoopTimeNanos, int numRecords, long recordBatchSizeBytes,
+			int maxNumberOfRecordsPerFetch) {
+		if (useAdaptiveReads && numRecords != 0 && runLoopTimeNanos != 0) {
+			long averageRecordSizeBytes = recordBatchSizeBytes / numRecords;
+			// Adjust number of records to fetch from the shard depending on current average record size
+			// to optimize 2 Mb / sec read limits
+			double loopFrequencyHz = 1000000000.0d / runLoopTimeNanos;
+			double bytesPerRead = KINESIS_SHARD_BYTES_PER_SECOND_LIMIT / loopFrequencyHz;
+			maxNumberOfRecordsPerFetch = (int) (bytesPerRead / averageRecordSizeBytes);
+			// Ensure the value is greater than 0 and not more than 10000L
+			maxNumberOfRecordsPerFetch = Math.max(1, Math.min(maxNumberOfRecordsPerFetch, ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_MAX));
+
+			// Set metrics
+			shardMetricsReporter.setAverageRecordSizeBytes(averageRecordSizeBytes);
+			shardMetricsReporter.setLoopFrequencyHz(loopFrequencyHz);
+			shardMetricsReporter.setBytesPerRead(bytesPerRead);
+		}
+		return maxNumberOfRecordsPerFetch;
+	}
+
 	/**
 	 * The loop in run() checks this before fetching next batch of records. Since this runnable will be executed
 	 * by the ExecutorService {@link KinesisDataFetcher#shardConsumersExecutor}, the only way to close down this thread
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/KinesisConsumerMetricConstants.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/KinesisConsumerMetricConstants.java
index 1b83f161d00..e850d25b6c6 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/KinesisConsumerMetricConstants.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/KinesisConsumerMetricConstants.java
@@ -34,4 +34,13 @@
 	public static final String SHARD_METRICS_GROUP = "shardId";
 
 	public static final String MILLIS_BEHIND_LATEST_GAUGE = "millisBehindLatest";
+	public static final String SLEEP_TIME_MILLIS = "sleepTimeMillis";
+	public static final String MAX_RECORDS_PER_FETCH = "maxNumberOfRecordsPerFetch";
+	public static final String NUM_AGGREGATED_RECORDS_PER_FETCH = "numberOfAggregatedRecordsPerFetch";
+	public static final String NUM_DEAGGREGATED_RECORDS_PER_FETCH = "numberOfDeaggregatedRecordsPerFetch";
+	public static final String AVG_RECORD_SIZE_BYTES = "averageRecordSizeBytes";
+	public static final String RUNTIME_LOOP_NANOS = "runLoopTimeNanos";
+	public static final String LOOP_FREQUENCY_HZ = "loopFrequencyHz";
+	public static final String BYTES_PER_READ = "bytesRequestedPerFetch";
+
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardMetricsReporter.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardMetricsReporter.java
index 2b6a491d247..4a27b9cdcb4 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardMetricsReporter.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardMetricsReporter.java
@@ -28,6 +28,14 @@
 public class ShardMetricsReporter {
 
 	private volatile long millisBehindLatest = -1;
+	private volatile double loopFrequencyHz = 0.0;
+	private volatile double bytesPerRead = 0.0;
+	private volatile long runLoopTimeNanos = 0L;
+	private volatile long averageRecordSizeBytes = 0L;
+	private volatile long sleepTimeMillis = 0L;
+	private volatile int numberOfAggregatedRecords = 0;
+	private volatile int numberOfDeaggregatedRecords = 0;
+	private volatile int maxNumberOfRecordsPerFetch = 0;
 
 	public long getMillisBehindLatest() {
 		return millisBehindLatest;
@@ -37,4 +45,68 @@ public void setMillisBehindLatest(long millisBehindLatest) {
 		this.millisBehindLatest = millisBehindLatest;
 	}
 
+	public double getLoopFrequencyHz() {
+		return loopFrequencyHz;
+	}
+
+	public void setLoopFrequencyHz(double loopFrequencyHz) {
+		this.loopFrequencyHz = loopFrequencyHz;
+	}
+
+	public double getBytesPerRead() {
+		return bytesPerRead;
+	}
+
+	public void setBytesPerRead(double bytesPerRead) {
+		this.bytesPerRead = bytesPerRead;
+	}
+
+	public long getRunLoopTimeNanos() {
+		return runLoopTimeNanos;
+	}
+
+	public void setRunLoopTimeNanos(long runLoopTimeNanos) {
+		this.runLoopTimeNanos = runLoopTimeNanos;
+	}
+
+	public long getAverageRecordSizeBytes() {
+		return averageRecordSizeBytes;
+	}
+
+	public void setAverageRecordSizeBytes(long averageRecordSizeBytes) {
+		this.averageRecordSizeBytes = averageRecordSizeBytes;
+	}
+
+	public long getSleepTimeMillis() {
+		return sleepTimeMillis;
+	}
+
+	public void setSleepTimeMillis(long sleepTimeMillis) {
+		this.sleepTimeMillis = sleepTimeMillis;
+	}
+
+	public int getNumberOfAggregatedRecords() {
+		return numberOfAggregatedRecords;
+	}
+
+	public void setNumberOfAggregatedRecords(int numberOfAggregatedRecords) {
+		this.numberOfAggregatedRecords = numberOfAggregatedRecords;
+	}
+
+	public int getNumberOfDeaggregatedRecords() {
+		return numberOfDeaggregatedRecords;
+	}
+
+	public void setNumberOfDeaggregatedRecords(int numberOfDeaggregatedRecords) {
+		this.numberOfDeaggregatedRecords = numberOfDeaggregatedRecords;
+	}
+
+	public int getMaxNumberOfRecordsPerFetch() {
+		return maxNumberOfRecordsPerFetch;
+	}
+
+	public void setMaxNumberOfRecordsPerFetch(int maxNumberOfRecordsPerFetch) {
+		this.maxNumberOfRecordsPerFetch = maxNumberOfRecordsPerFetch;
+	}
+
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
index 7e6a3604414..262181ae3bc 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
@@ -91,6 +91,9 @@
 	/** Exponential backoff power constant for the list shards operation. */
 	private final double listShardsExpConstant;
 
+	/** Maximum retry attempts for the list shards operation. */
+	private final int listShardsMaxRetries;
+
 	// ------------------------------------------------------------------------
 	//  getRecords() related performance settings
 	// ------------------------------------------------------------------------
@@ -104,8 +107,8 @@
 	/** Exponential backoff power constant for the get records operation. */
 	private final double getRecordsExpConstant;
 
-	/** Maximum attempts for the get records operation. */
-	private final int getRecordsMaxAttempts;
+	/** Maximum retry attempts for the get records operation. */
+	private final int getRecordsMaxRetries;
 
 	// ------------------------------------------------------------------------
 	//  getShardIterator() related performance settings
@@ -120,8 +123,8 @@
 	/** Exponential backoff power constant for the get shard iterator operation. */
 	private final double getShardIteratorExpConstant;
 
-	/** Maximum attempts for the get shard iterator operation. */
-	private final int getShardIteratorMaxAttempts;
+	/** Maximum retry attempts for the get shard iterator operation. */
+	private final int getShardIteratorMaxRetries;
 
 	/**
 	 * Create a new KinesisProxy based on the supplied configuration properties.
@@ -146,6 +149,10 @@ protected KinesisProxy(Properties configProps) {
 			configProps.getProperty(
 				ConsumerConfigConstants.LIST_SHARDS_BACKOFF_EXPONENTIAL_CONSTANT,
 				Double.toString(ConsumerConfigConstants.DEFAULT_LIST_SHARDS_BACKOFF_EXPONENTIAL_CONSTANT)));
+		this.listShardsMaxRetries = Integer.valueOf(
+			configProps.getProperty(
+				ConsumerConfigConstants.LIST_SHARDS_RETRIES,
+				Long.toString(ConsumerConfigConstants.DEFAULT_LIST_SHARDS_RETRIES)));
 
 		this.getRecordsBaseBackoffMillis = Long.valueOf(
 			configProps.getProperty(
@@ -159,7 +166,7 @@ protected KinesisProxy(Properties configProps) {
 			configProps.getProperty(
 				ConsumerConfigConstants.SHARD_GETRECORDS_BACKOFF_EXPONENTIAL_CONSTANT,
 				Double.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_BACKOFF_EXPONENTIAL_CONSTANT)));
-		this.getRecordsMaxAttempts = Integer.valueOf(
+		this.getRecordsMaxRetries = Integer.valueOf(
 			configProps.getProperty(
 				ConsumerConfigConstants.SHARD_GETRECORDS_RETRIES,
 				Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_RETRIES)));
@@ -176,7 +183,7 @@ protected KinesisProxy(Properties configProps) {
 			configProps.getProperty(
 				ConsumerConfigConstants.SHARD_GETITERATOR_BACKOFF_EXPONENTIAL_CONSTANT,
 				Double.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETITERATOR_BACKOFF_EXPONENTIAL_CONSTANT)));
-		this.getShardIteratorMaxAttempts = Integer.valueOf(
+		this.getShardIteratorMaxRetries = Integer.valueOf(
 			configProps.getProperty(
 				ConsumerConfigConstants.SHARD_GETITERATOR_RETRIES,
 				Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETITERATOR_RETRIES)));
@@ -217,14 +224,14 @@ public GetRecordsResult getRecords(String shardIterator, int maxRecordsToGet) th
 
 		GetRecordsResult getRecordsResult = null;
 
-		int attempt = 0;
-		while (attempt <= getRecordsMaxAttempts && getRecordsResult == null) {
+		int retryCount = 0;
+		while (retryCount <= getRecordsMaxRetries && getRecordsResult == null) {
 			try {
 				getRecordsResult = kinesisClient.getRecords(getRecordsRequest);
 			} catch (SdkClientException ex) {
 				if (isRecoverableSdkClientException(ex)) {
 					long backoffMillis = fullJitterBackoff(
-						getRecordsBaseBackoffMillis, getRecordsMaxBackoffMillis, getRecordsExpConstant, attempt++);
+						getRecordsBaseBackoffMillis, getRecordsMaxBackoffMillis, getRecordsExpConstant, retryCount++);
 					LOG.warn("Got recoverable SdkClientException. Backing off for "
 						+ backoffMillis + " millis (" + ex.getMessage() + ")");
 					Thread.sleep(backoffMillis);
@@ -235,7 +242,7 @@ public GetRecordsResult getRecords(String shardIterator, int maxRecordsToGet) th
 		}
 
 		if (getRecordsResult == null) {
-			throw new RuntimeException("Rate Exceeded for getRecords operation - all " + getRecordsMaxAttempts +
+			throw new RuntimeException("Rate Exceeded for getRecords operation - all " + getRecordsMaxRetries +
 				" retry attempts returned ProvisionedThroughputExceededException.");
 		}
 
@@ -292,14 +299,14 @@ public String getShardIterator(StreamShardHandle shard, String shardIteratorType
 	private String getShardIterator(GetShardIteratorRequest getShardIteratorRequest) throws InterruptedException {
 		GetShardIteratorResult getShardIteratorResult = null;
 
-		int attempt = 0;
-		while (attempt <= getShardIteratorMaxAttempts && getShardIteratorResult == null) {
+		int retryCount = 0;
+		while (retryCount <= getShardIteratorMaxRetries && getShardIteratorResult == null) {
 			try {
 					getShardIteratorResult = kinesisClient.getShardIterator(getShardIteratorRequest);
 			} catch (AmazonServiceException ex) {
 				if (isRecoverableException(ex)) {
 					long backoffMillis = fullJitterBackoff(
-						getShardIteratorBaseBackoffMillis, getShardIteratorMaxBackoffMillis, getShardIteratorExpConstant, attempt++);
+						getShardIteratorBaseBackoffMillis, getShardIteratorMaxBackoffMillis, getShardIteratorExpConstant, retryCount++);
 					LOG.warn("Got recoverable AmazonServiceException. Backing off for "
 						+ backoffMillis + " millis (" + ex.getErrorMessage() + ")");
 					Thread.sleep(backoffMillis);
@@ -310,7 +317,7 @@ private String getShardIterator(GetShardIteratorRequest getShardIteratorRequest)
 		}
 
 		if (getShardIteratorResult == null) {
-			throw new RuntimeException("Rate Exceeded for getShardIterator operation - all " + getShardIteratorMaxAttempts +
+			throw new RuntimeException("Rate Exceeded for getShardIterator operation - all " + getShardIteratorMaxRetries +
 				" retry attempts returned ProvisionedThroughputExceededException.");
 		}
 		return getShardIteratorResult.getShardIterator();
@@ -406,16 +413,16 @@ private ListShardsResult listShards(String streamName, @Nullable String startSha
 		ListShardsResult listShardsResults = null;
 
 		// Call ListShards, with full-jitter backoff (if we get LimitExceededException).
-		int attemptCount = 0;
+		int retryCount = 0;
 		// List Shards returns just the first 1000 shard entries. Make sure that all entries
 		// are taken up.
-		while (listShardsResults == null) { // retry until we get a result
+		while (retryCount <= listShardsMaxRetries && listShardsResults == null) { // retry until we get a result
 			try {
 
 				listShardsResults = kinesisClient.listShards(listShardsRequest);
 			} catch (LimitExceededException le) {
 				long backoffMillis = fullJitterBackoff(
-						listShardsBaseBackoffMillis, listShardsMaxBackoffMillis, listShardsExpConstant, attemptCount++);
+						listShardsBaseBackoffMillis, listShardsMaxBackoffMillis, listShardsExpConstant, retryCount++);
 					LOG.warn("Got LimitExceededException when listing shards from stream " + streamName
 									+ ". Backing off for " + backoffMillis + " millis.");
 				Thread.sleep(backoffMillis);
@@ -433,6 +440,18 @@ private ListShardsResult listShards(String streamName, @Nullable String startSha
 			} catch (ExpiredNextTokenException expiredToken) {
 				LOG.warn("List Shards has an expired token. Reusing the previous state.");
 				break;
+			} catch (SdkClientException ex) {
+				if (retryCount < listShardsMaxRetries && isRecoverableSdkClientException(ex)) {
+					long backoffMillis = fullJitterBackoff(
+						listShardsBaseBackoffMillis, listShardsMaxBackoffMillis, listShardsExpConstant, retryCount++);
+					LOG.warn("Got SdkClientException when listing shards from stream {}. Backing off for {} millis.",
+						streamName, backoffMillis);
+					Thread.sleep(backoffMillis);
+				} else {
+					// propagate if retries exceeded or not recoverable
+					// (otherwise would return null result and keep trying forever)
+					throw ex;
+				}
 			}
 		}
 		// Kinesalite (mock implementation of Kinesis) does not correctly exclude shards before
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
index 9e5c6cbe450..e25a6015771 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
@@ -36,14 +36,14 @@
 import com.amazonaws.regions.Regions;
 import com.amazonaws.services.kinesis.AmazonKinesis;
 import com.amazonaws.services.kinesis.AmazonKinesisClientBuilder;
+import com.amazonaws.services.securitytoken.AWSSecurityTokenService;
+import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder;
 import com.fasterxml.jackson.databind.JsonNode;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.deser.BeanDeserializerFactory;
 import com.fasterxml.jackson.databind.deser.BeanDeserializerModifier;
 import com.fasterxml.jackson.databind.deser.DefaultDeserializationContext;
 import com.fasterxml.jackson.databind.deser.DeserializerFactory;
-import com.amazonaws.services.securitytoken.AWSSecurityTokenService;
-import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder;
 
 import java.io.IOException;
 import java.util.HashMap;
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
index a4d60ed496d..75c84cdca5a 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
@@ -87,6 +87,12 @@ public static void validateConsumerConfiguration(Properties config) {
 
 		validateAwsConfiguration(config);
 
+		if (!(config.containsKey(AWSConfigConstants.AWS_REGION) ^ config.containsKey(ConsumerConfigConstants.AWS_ENDPOINT))) {
+			// per validation in AwsClientBuilder
+			throw new IllegalArgumentException(String.format("For FlinkKinesisConsumer either AWS region ('%s') or AWS endpoint ('%s') must be set in the config.",
+					AWSConfigConstants.AWS_REGION, AWSConfigConstants.AWS_ENDPOINT));
+		}
+
 		if (config.containsKey(ConsumerConfigConstants.STREAM_INITIAL_POSITION)) {
 			String initPosType = config.getProperty(ConsumerConfigConstants.STREAM_INITIAL_POSITION);
 
@@ -213,6 +219,11 @@ public static KinesisProducerConfiguration getValidatedProducerConfiguration(Pro
 
 		validateAwsConfiguration(config);
 
+		if (!config.containsKey(AWSConfigConstants.AWS_REGION)) {
+			// per requirement in Amazon Kinesis Producer Library
+			throw new IllegalArgumentException(String.format("For FlinkKinesisProducer AWS region ('%s') must be set in the config.", AWSConfigConstants.AWS_REGION));
+		}
+
 		KinesisProducerConfiguration kpc = KinesisProducerConfiguration.fromProperties(config);
 		kpc.setRegion(config.getProperty(AWSConfigConstants.AWS_REGION));
 
@@ -266,12 +277,6 @@ public static void validateAwsConfiguration(Properties config) {
 			}
 		}
 
-		if (!(config.containsKey(AWSConfigConstants.AWS_REGION) ^ config.containsKey(ConsumerConfigConstants.AWS_ENDPOINT))) {
-			// per validation in AwsClientBuilder
-			throw new IllegalArgumentException(String.format("Either AWS region ('%s') or AWS endpoint ('%s') must be set in the config.",
-				AWSConfigConstants.AWS_REGION, AWSConfigConstants.AWS_REGION));
-		}
-
 		if (config.containsKey(AWSConfigConstants.AWS_REGION)) {
 			// specified AWS Region name must be recognizable
 			if (!AWSUtil.isValidRegion(config.getProperty(AWSConfigConstants.AWS_REGION))) {
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/TimeoutLatch.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/TimeoutLatch.java
index 4dcab33bd1a..49a9ed71ae0 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/TimeoutLatch.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/TimeoutLatch.java
@@ -20,6 +20,9 @@
 
 import org.apache.flink.annotation.Internal;
 
+/**
+ * Internal use.
+ */
 @Internal
 public class TimeoutLatch {
 
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
index 4478b2ff8e5..dbc71182b04 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
@@ -168,6 +168,51 @@ public void testCorrectNumOfCollectedRecordsAndUpdatedStateWithUnexpectedExpired
 			subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum());
 	}
 
+	@Test
+	public void testCorrectNumOfCollectedRecordsAndUpdatedStateWithAdaptiveReads() {
+		Properties consumerProperties = new Properties();
+		consumerProperties.put("flink.shard.adaptivereads", "true");
+
+		StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);
+
+		LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
+		subscribedShardsStateUnderTest.add(
+			new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
+				fakeToBeConsumedShard, new SequenceNumber("fakeStartingState")));
+
+		TestSourceContext<String> sourceContext = new TestSourceContext<>();
+
+		TestableKinesisDataFetcher<String> fetcher =
+			new TestableKinesisDataFetcher<>(
+				Collections.singletonList("fakeStream"),
+				sourceContext,
+				consumerProperties,
+				new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
+				10,
+				2,
+				new AtomicReference<>(),
+				subscribedShardsStateUnderTest,
+				KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
+				Mockito.mock(KinesisProxyInterface.class));
+
+		new ShardConsumer<>(
+			fetcher,
+			0,
+			subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
+			subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
+			// Initial number of records to fetch --> 10
+			FakeKinesisBehavioursFactory.initialNumOfRecordsAfterNumOfGetRecordsCallsWithAdaptiveReads(10, 2, 500L),
+			new ShardMetricsReporter()).run();
+
+		// Avg record size for first batch --> 10 * 10 Kb/10 = 10 Kb
+		// Number of records fetched in second batch --> 2 Mb/10Kb * 5 = 40
+		// Total number of records = 10 + 40 = 50
+		assertEquals(50, sourceContext.getCollectedOutputs().size());
+		assertEquals(
+			SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get(),
+			subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum());
+	}
+
 	private static StreamShardHandle getMockStreamShard(String streamName, int shardId) {
 		return new StreamShardHandle(
 			streamName,
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java
index 775ae4b3352..edf6ceb0d57 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java
@@ -27,16 +27,24 @@
 import com.amazonaws.AmazonServiceException.ErrorType;
 import com.amazonaws.ClientConfiguration;
 import com.amazonaws.ClientConfigurationFactory;
+import com.amazonaws.SdkClientException;
 import com.amazonaws.services.kinesis.AmazonKinesis;
+import com.amazonaws.services.kinesis.AmazonKinesisClient;
+import com.amazonaws.services.kinesis.model.AmazonKinesisException;
 import com.amazonaws.services.kinesis.model.ExpiredIteratorException;
+import com.amazonaws.services.kinesis.model.GetRecordsResult;
 import com.amazonaws.services.kinesis.model.ListShardsRequest;
 import com.amazonaws.services.kinesis.model.ListShardsResult;
 import com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException;
 import com.amazonaws.services.kinesis.model.Shard;
+import org.apache.commons.lang3.mutable.MutableInt;
 import org.hamcrest.Description;
 import org.hamcrest.TypeSafeDiagnosingMatcher;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 import org.powermock.reflect.Whitebox;
 
 import java.util.ArrayList;
@@ -54,6 +62,7 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.argThat;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
@@ -91,6 +100,37 @@ public void testIsRecoverableExceptionWithNullErrorType() {
 		assertFalse(KinesisProxy.isRecoverableException(ex));
 	}
 
+	@Test
+	public void testGetRecordsRetry() throws Exception {
+		Properties kinesisConsumerConfig = new Properties();
+		kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_REGION, "us-east-1");
+
+		final GetRecordsResult expectedResult = new GetRecordsResult();
+		MutableInt retries = new MutableInt();
+		final Throwable[] retriableExceptions = new Throwable[] {
+			new AmazonKinesisException("mock"),
+		};
+
+		AmazonKinesisClient mockClient = mock(AmazonKinesisClient.class);
+		Mockito.when(mockClient.getRecords(any())).thenAnswer(new Answer<GetRecordsResult>() {
+			@Override
+			public GetRecordsResult answer(InvocationOnMock invocation) throws Throwable{
+				if (retries.intValue() < retriableExceptions.length) {
+					retries.increment();
+					throw retriableExceptions[retries.intValue() - 1];
+				}
+				return expectedResult;
+			}
+		});
+
+		KinesisProxy kinesisProxy = new KinesisProxy(kinesisConsumerConfig);
+		Whitebox.getField(KinesisProxy.class, "kinesisClient").set(kinesisProxy, mockClient);
+
+		GetRecordsResult result = kinesisProxy.getRecords("fakeShardIterator", 1);
+		assertEquals(retriableExceptions.length, retries.intValue());
+		assertEquals(expectedResult, result);
+	}
+
 	@Test
 	public void testGetShardList() throws Exception {
 		List<String> shardIds =
@@ -151,6 +191,60 @@ public void testGetShardList() throws Exception {
 						expectedStreamShard.toArray(new StreamShardHandle[actualShardList.size()])));
 	}
 
+	@Test
+	public void testGetShardListRetry() throws Exception {
+		Properties kinesisConsumerConfig = new Properties();
+		kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_REGION, "us-east-1");
+
+		Shard shard = new Shard();
+		shard.setShardId("fake-shard-000000000000");
+		final ListShardsResult expectedResult = new ListShardsResult();
+		expectedResult.withShards(shard);
+
+		MutableInt exceptionCount = new MutableInt();
+		final Throwable[] retriableExceptions = new Throwable[]{
+			new AmazonKinesisException("attempt1"),
+			new AmazonKinesisException("attempt2"),
+		};
+
+		AmazonKinesisClient mockClient = mock(AmazonKinesisClient.class);
+		Mockito.when(mockClient.listShards(any())).thenAnswer(new Answer<ListShardsResult>() {
+
+			@Override
+			public ListShardsResult answer(InvocationOnMock invocation) throws Throwable {
+				if (exceptionCount.intValue() < retriableExceptions.length) {
+					exceptionCount.increment();
+					throw retriableExceptions[exceptionCount.intValue() - 1];
+				}
+				return expectedResult;
+			}
+		});
+
+		KinesisProxy kinesisProxy = new KinesisProxy(kinesisConsumerConfig);
+		Whitebox.getField(KinesisProxy.class, "kinesisClient").set(kinesisProxy, mockClient);
+
+		HashMap<String, String> streamNames = new HashMap();
+		streamNames.put("fake-stream", null);
+		GetShardListResult result = kinesisProxy.getShardList(streamNames);
+		assertEquals(retriableExceptions.length, exceptionCount.intValue());
+		assertEquals(true, result.hasRetrievedShards());
+		assertEquals(shard.getShardId(), result.getLastSeenShardOfStream("fake-stream").getShard().getShardId());
+
+		// test max attempt count exceeded
+		int maxRetries = 1;
+		exceptionCount.setValue(0);
+		kinesisConsumerConfig.setProperty(ConsumerConfigConstants.LIST_SHARDS_RETRIES, String.valueOf(maxRetries));
+		kinesisProxy = new KinesisProxy(kinesisConsumerConfig);
+		Whitebox.getField(KinesisProxy.class, "kinesisClient").set(kinesisProxy, mockClient);
+		try {
+			kinesisProxy.getShardList(streamNames);
+			Assert.fail("exception expected");
+		} catch (SdkClientException ex) {
+			assertEquals(retriableExceptions[maxRetries], ex);
+		}
+		assertEquals(maxRetries + 1, exceptionCount.intValue());
+	}
+
 	@Test
 	public void testCustomConfigurationOverride() {
 		Properties configProps = new Properties();
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
index e4036233720..eb3415572c0 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
@@ -18,6 +18,7 @@
 package org.apache.flink.streaming.connectors.kinesis.testutils;
 
 import org.apache.flink.configuration.ConfigConstants;
+import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.proxy.GetShardListResult;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
@@ -93,6 +94,14 @@ public static KinesisProxyInterface totalNumOfRecordsAfterNumOfGetRecordsCallsWi
 			numOfRecords, numOfGetRecordsCall, orderOfCallToExpire, millisBehindLatest);
 	}
 
+	public static KinesisProxyInterface initialNumOfRecordsAfterNumOfGetRecordsCallsWithAdaptiveReads(
+			final int numOfRecords,
+			final int numOfGetRecordsCalls,
+			final long millisBehindLatest) {
+		return new SingleShardEmittingAdaptiveNumOfRecordsKinesis(numOfRecords, numOfGetRecordsCalls,
+				millisBehindLatest);
+	}
+
 	private static class SingleShardEmittingFixNumOfRecordsWithExpiredIteratorKinesis extends SingleShardEmittingFixNumOfRecordsKinesis {
 
 		private final long millisBehindLatest;
@@ -227,6 +236,104 @@ public GetShardListResult getShardList(Map<String, String> streamNamesWithLastSe
 
 	}
 
+	private static class SingleShardEmittingAdaptiveNumOfRecordsKinesis implements
+			KinesisProxyInterface {
+
+		protected final int totalNumOfGetRecordsCalls;
+
+		protected final int totalNumOfRecords;
+
+		private final long millisBehindLatest;
+
+		protected final Map<String, List<Record>> shardItrToRecordBatch;
+
+		protected static long averageRecordSizeBytes;
+
+		private static final long KINESIS_SHARD_BYTES_PER_SECOND_LIMIT = 2 * 1024L * 1024L;
+
+		public SingleShardEmittingAdaptiveNumOfRecordsKinesis(final int numOfRecords,
+				final int numOfGetRecordsCalls,
+				final long millisBehindLatest) {
+			this.totalNumOfRecords = numOfRecords;
+			this.totalNumOfGetRecordsCalls = numOfGetRecordsCalls;
+			this.millisBehindLatest = millisBehindLatest;
+			this.averageRecordSizeBytes = 0L;
+
+			// initialize the record batches that we will be fetched
+			this.shardItrToRecordBatch = new HashMap<>();
+
+			int numOfAlreadyPartitionedRecords = 0;
+			int numOfRecordsPerBatch = numOfRecords;
+			for (int batch = 0; batch < totalNumOfGetRecordsCalls; batch++) {
+					shardItrToRecordBatch.put(
+							String.valueOf(batch),
+							createRecordBatchWithRange(
+									numOfAlreadyPartitionedRecords,
+									numOfAlreadyPartitionedRecords + numOfRecordsPerBatch));
+					numOfAlreadyPartitionedRecords += numOfRecordsPerBatch;
+
+				numOfRecordsPerBatch = (int) (KINESIS_SHARD_BYTES_PER_SECOND_LIMIT /
+						(averageRecordSizeBytes * 1000L / ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_INTERVAL_MILLIS));
+			}
+		}
+
+		@Override
+		public GetRecordsResult getRecords(String shardIterator, int maxRecordsToGet) {
+			// assuming that the maxRecordsToGet is always large enough
+			return new GetRecordsResult()
+					.withRecords(shardItrToRecordBatch.get(shardIterator))
+					.withMillisBehindLatest(millisBehindLatest)
+					.withNextShardIterator(
+							(Integer.valueOf(shardIterator) == totalNumOfGetRecordsCalls - 1)
+									? null : String
+									.valueOf(Integer.valueOf(shardIterator) + 1)); // last next shard iterator is null
+		}
+
+		@Override
+		public String getShardIterator(StreamShardHandle shard, String shardIteratorType,
+				Object startingMarker) {
+			// this will be called only one time per ShardConsumer;
+			// so, simply return the iterator of the first batch of records
+			return "0";
+		}
+
+		@Override
+		public GetShardListResult getShardList(Map<String, String> streamNamesWithLastSeenShardIds) {
+			return null;
+		}
+
+		public static List<Record> createRecordBatchWithRange(int min, int max) {
+			List<Record> batch = new LinkedList<>();
+			long	sumRecordBatchBytes = 0L;
+			// Create record of size 10Kb
+			String data = createDataSize(10 * 1024L);
+
+			for (int i = min; i < max; i++) {
+				Record record = new Record()
+								.withData(
+										ByteBuffer.wrap(String.valueOf(data).getBytes(ConfigConstants.DEFAULT_CHARSET)))
+								.withPartitionKey(UUID.randomUUID().toString())
+								.withApproximateArrivalTimestamp(new Date(System.currentTimeMillis()))
+								.withSequenceNumber(String.valueOf(i));
+				batch.add(record);
+				sumRecordBatchBytes += record.getData().remaining();
+
+			}
+			if (batch.size() != 0) {
+				averageRecordSizeBytes = sumRecordBatchBytes / batch.size();
+			}
+
+			return batch;
+		}
+
+		private static String createDataSize(long msgSize) {
+			char[] data = new char[(int) msgSize];
+			return new String(data);
+
+		}
+
+	}
+
 	private static class NonReshardedStreamsKinesis implements KinesisProxyInterface {
 
 		private Map<String, List<StreamShardHandle>> streamsWithListOfShards = new HashMap<>();
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
index b7cfb2d32d1..21588c9a7a7 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
@@ -72,7 +72,7 @@ public TestableKinesisDataFetcher(
 			thrownErrorUnderTest,
 			subscribedShardsStateUnderTest,
 			subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
-			fakeKinesis);
+			(properties) -> fakeKinesis);
 
 		this.runWaiter = new OneShotLatch();
 		this.initialDiscoveryWaiter = new OneShotLatch();
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java
index c4bfa17675c..ae2d21e9493 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java
@@ -125,14 +125,10 @@ public void testCorrectlySetRegionInProducerConfiguration() {
 		assertEquals("incorrect region", region, kpc.getRegion());
 	}
 
-	// ----------------------------------------------------------------------
-	// validateAwsConfiguration() tests
-	// ----------------------------------------------------------------------
-
 	@Test
-	public void testMissingAwsRegionInConfig() {
-		String expectedMessage = String.format("Either AWS region ('%s') or AWS endpoint ('%s') must be set in the config.",
-			AWSConfigConstants.AWS_REGION, AWSConfigConstants.AWS_REGION);
+	public void testMissingAwsRegionInProducerConfig() {
+		String expectedMessage = String.format("For FlinkKinesisProducer AWS region ('%s') must be set in the config.",
+				AWSConfigConstants.AWS_REGION);
 		exception.expect(IllegalArgumentException.class);
 		exception.expectMessage(expectedMessage);
 
@@ -140,9 +136,13 @@ public void testMissingAwsRegionInConfig() {
 		testConfig.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, "accessKey");
 		testConfig.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, "secretKey");
 
-		KinesisConfigUtil.validateAwsConfiguration(testConfig);
+		KinesisConfigUtil.getValidatedProducerConfiguration(testConfig);
 	}
 
+	// ----------------------------------------------------------------------
+	// validateAwsConfiguration() tests
+	// ----------------------------------------------------------------------
+
 	@Test
 	public void testUnrecognizableAwsRegionInConfig() {
 		exception.expect(IllegalArgumentException.class);
@@ -156,22 +156,6 @@ public void testUnrecognizableAwsRegionInConfig() {
 		KinesisConfigUtil.validateAwsConfiguration(testConfig);
 	}
 
-	@Test
-	public void testAwsRegionOrEndpointInConfig() {
-		String expectedMessage = String.format("Either AWS region ('%s') or AWS endpoint ('%s') must be set in the config.",
-			AWSConfigConstants.AWS_REGION, AWSConfigConstants.AWS_REGION);
-		exception.expect(IllegalArgumentException.class);
-		exception.expectMessage(expectedMessage);
-
-		Properties testConfig = new Properties();
-		testConfig.setProperty(AWSConfigConstants.AWS_REGION, "us-east");
-		testConfig.setProperty(AWSConfigConstants.AWS_ENDPOINT, "fake");
-		testConfig.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, "accessKey");
-		testConfig.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, "secretKey");
-
-		KinesisConfigUtil.validateAwsConfiguration(testConfig);
-	}
-
 	@Test
 	public void testCredentialProviderTypeSetToBasicButNoCredentialSetInConfig() {
 		exception.expect(IllegalArgumentException.class);
@@ -200,6 +184,22 @@ public void testUnrecognizableCredentialProviderTypeInConfig() {
 	// validateConsumerConfiguration() tests
 	// ----------------------------------------------------------------------
 
+	@Test
+	public void testAwsRegionOrEndpointInConsumerConfig() {
+		String expectedMessage = String.format("For FlinkKinesisConsumer either AWS region ('%s') or AWS endpoint ('%s') must be set in the config.",
+				AWSConfigConstants.AWS_REGION, AWSConfigConstants.AWS_ENDPOINT);
+		exception.expect(IllegalArgumentException.class);
+		exception.expectMessage(expectedMessage);
+
+		Properties testConfig = new Properties();
+		testConfig.setProperty(AWSConfigConstants.AWS_REGION, "us-east-1");
+		testConfig.setProperty(AWSConfigConstants.AWS_ENDPOINT, "fake");
+		testConfig.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, "accessKey");
+		testConfig.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, "secretKey");
+
+		KinesisConfigUtil.validateConsumerConfiguration(testConfig);
+	}
+
 	@Test
 	public void testUnrecognizableStreamInitPositionTypeInConfig() {
 		exception.expect(IllegalArgumentException.class);
diff --git a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseTestingClusterAutostarter.java b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseTestingClusterAutostarter.java
index e4b2bd2fa3a..2e61a285330 100644
--- a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseTestingClusterAutostarter.java
+++ b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseTestingClusterAutostarter.java
@@ -24,7 +24,6 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -36,10 +35,6 @@
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.ScannerCallable;
-import org.apache.hadoop.hbase.ipc.AbstractRpcClient;
-import org.apache.hadoop.hbase.ipc.RpcServer;
-import org.apache.log4j.Level;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
@@ -149,9 +144,6 @@ private static Configuration initialize(Configuration conf) {
 	@BeforeClass
 	public static void setUp() throws Exception {
 		LOG.info("HBase minicluster: Starting");
-		((Log4JLogger) RpcServer.LOG).getLogger().setLevel(Level.ALL);
-		((Log4JLogger) AbstractRpcClient.LOG).getLogger().setLevel(Level.ALL);
-		((Log4JLogger) ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
 
 		TEST_UTIL.startMiniCluster(1);
 
diff --git a/flink-connectors/pom.xml b/flink-connectors/pom.xml
index 782d2beb66c..3ff139829fc 100644
--- a/flink-connectors/pom.xml
+++ b/flink-connectors/pom.xml
@@ -50,6 +50,7 @@ under the License.
 		<module>flink-connector-elasticsearch</module>
 		<module>flink-connector-elasticsearch2</module>
 		<module>flink-connector-elasticsearch5</module>
+		<module>flink-connector-elasticsearch6</module>
 		<module>flink-connector-rabbitmq</module>
 		<module>flink-connector-twitter</module>
 		<module>flink-connector-nifi</module>
diff --git a/flink-container/docker/README.md b/flink-container/docker/README.md
index 644b31c7a1a..3ff70c6b3bd 100644
--- a/flink-container/docker/README.md
+++ b/flink-container/docker/README.md
@@ -1,40 +1,80 @@
-# Apache Flink job cluster deployment on docker using docker-compose
+# Apache Flink job cluster Docker image
 
-## Installation
+In order to deploy a job cluster on Docker, one needs to create an image which contains the Flink binaries as well as the user code jars of the job to execute.
+This directory contains a `build.sh` which facilitates the process.
+The script takes a Flink distribution either from an official release, an archive or a local distribution and combines it with the specified job jar.  
 
-Install the most recent stable version of docker
-https://docs.docker.com/installation/
+## Installing Docker
 
-## Build
+Install the most recent stable version of [Docker](https://docs.docker.com/installation/).
 
-Images are based on the official Java Alpine (OpenJDK 8) image. If you want to
-build the flink image run:
+## Building the Docker image
 
-    build.sh --from-local-dist --job-jar /path/to/job/jar/job.jar --image-name flink:job
+Images are based on the official Java Alpine (OpenJDK 8) image.
 
-If you want to build the container for a specific version of flink/hadoop/scala
-you can configure it in the respective args:
+Before building the image, one needs to build the user code jars for the job.
+Assume that the job jar is stored under `<PATH_TO_JOB_JAR>` 
 
-    docker build --build-arg FLINK_VERSION=1.6.0 --build-arg HADOOP_VERSION=28 --build-arg SCALA_VERSION=2.11 -t "flink:1.6.0-hadoop2.8-scala_2.11" flink
+If you want to build the Flink image from the version you have checked out locally run:
 
-## Deploy
+    build.sh --from-local-dist --job-jar <PATH_TO_JOB_JAR> --image-name <IMAGE_NAME>
+    
+Note that you first need to call `mvn package -pl flink-dist -am` to build the Flink binaries.
 
-- Deploy cluster and see config/setup log output (best run in a screen session)
+If you want to build the Flink image from an archive stored under `<PATH_TO_ARCHIVE>` run:
 
-        docker-compose up
+    build.sh --from-archive <PATH_TO_ARCHIVE> --job-jar <PATH_TO_JOB_JAR> --image-name <IMAGE_NAME>
 
-- Deploy as a daemon (and return)
+If you want to build the Flink image for a specific version of Flink/Hadoop/Scala run:
 
-        docker-compose up -d
+    build.sh --from-release --flink-version 1.6.0 --hadoop-version 2.8 --scala-version 2.11 --image-name <IMAGE_NAME>
+    
+The script will try to download the released version from the Apache archive.
 
-- Scale the cluster up or down to *N* TaskManagers
+## Deploying via Docker compose
+
+The `docker-compose.yml` contains the following parameters:
+
+* `FLINK_DOCKER_IMAGE_NAME` - Image name to use for the deployment (default: `flink-job:latest`)
+* `FLINK_JOB` - Name of the Flink job to execute (default: none)
+* `DEFAULT_PARALLELISM` - Default parallelism with which to start the job (default: 1)
+* `FLINK_JOB_ARGUMENTS` - Additional arguments which will be passed to the job cluster (default: none)
+* `SAVEPOINT_OPTIONS` - Savepoint options to start the cluster with (default: none)
+
+The parameters can be set by exporting the corresponding environment variable.
+
+Deploy cluster and see config/setup log output (best run in a screen session)
+
+        FLINK_DOCKER_IMAGE_NAME=<IMAGE_NAME> FLINK_JOB=<JOB_NAME> docker-compose up
+
+Deploy as a daemon (and return)
+
+        FLINK_DOCKER_IMAGE_NAME=<IMAGE_NAME> FLINK_JOB=<JOB_NAME> docker-compose up -d
+        
+In order to start the job with a different default parallelism set `DEFAULT_PARALLELISM`. 
+This will automatically start `DEFAULT_PARALLELISM` TaskManagers:
+        
+        FLINK_DOCKER_IMAGE_NAME=<IMAGE_NAME> FLINK_JOB=<JOB_NAME> DEFAULT_PARALLELISM=<DEFAULT_PARALLELISM> docker-compose up
+        
+In order to resume the job from a savepoint set `SAVEPOINT_OPTIONS`.
+Supported options are `--fromSavepoint <SAVEPOINT_PATH>` and `--allowNonRestoredState` where `<SAVEPOINT_PATH>` is accessible from all containers.
+
+        FLINK_DOCKER_IMAGE_NAME=<IMAGE_NAME> FLINK_JOB=<JOB_NAME> SAVEPOINT_OPTIONS="--fromSavepoint <SAVEPOINT_PATH> --allowNonRestoredState" docker-compose up 
+        
+One can also provide additional job arguments via `FLINK_JOB_ARGUMENTS` which are passed to the job:
+        
+        FLINK_DOCKER_IMAGE_NAME=<IMAGE_NAME> FLINK_JOB=<JOB_NAME> FLINK_JOB_ARGUMENTS=<JOB_ARGUMENTS> docker-compose up
+
+Scale the cluster up or down to *N* TaskManagers
 
         docker-compose scale taskmanager=<N>
 
-- Access the Job Manager container
+Access the Job Manager container
 
         docker exec -it $(docker ps --filter name=flink_jobmanager --format={{.ID}}) /bin/sh
+        
+Access the web UI by going to `<IP_DOCKER_MACHINE>:8081` in your web browser.
 
-- Kill the cluster
+Kill the cluster
 
         docker-compose kill
diff --git a/flink-container/docker/docker-compose.yml b/flink-container/docker/docker-compose.yml
index 81e4c8c8a54..a5e9b49f60c 100644
--- a/flink-container/docker/docker-compose.yml
+++ b/flink-container/docker/docker-compose.yml
@@ -16,16 +16,24 @@
 # limitations under the License.
 ################################################################################
 
-# Set the FLINK_DOCKER_IMAGE_NAME environment variable to override the image name to use
+# Docker compose file for a Flink job cluster deployment.
+#
+# Parameters:
+# * FLINK_DOCKER_IMAGE_NAME - Image name to use for the deployment (default: flink-job:latest)
+# * FLINK_JOB - Name of the Flink job to execute (default: none)
+# * DEFAULT_PARALLELISM - Default parallelism with which to start the job (default: 1)
+# * FLINK_JOB_ARGUMENTS - Additional arguments which will be passed to the job cluster (default: none)
+# * SAVEPOINT_OPTIONS - Savepoint options to start the cluster with (default: none)
 
-version: "2.1"
+version: "2.2"
 services:
   job-cluster:
     image: ${FLINK_DOCKER_IMAGE_NAME:-flink-job}
     ports:
       - "8081:8081"
-    command: job-cluster --job-classname ${FLINK_JOB} -Djobmanager.rpc.address=job-cluster
+    command: job-cluster --job-classname ${FLINK_JOB} -Djobmanager.rpc.address=job-cluster -Dparallelism.default=${DEFAULT_PARALLELISM:-1} ${SAVEPOINT_OPTIONS} ${FLINK_JOB_ARGUMENTS}
 
   taskmanager:
     image: ${FLINK_DOCKER_IMAGE_NAME:-flink-job}
     command: task-manager -Djobmanager.rpc.address=job-cluster
+    scale: ${DEFAULT_PARALLELISM:-1}
diff --git a/flink-container/kubernetes/README.md b/flink-container/kubernetes/README.md
index 9e0449616e2..8a663f691f9 100644
--- a/flink-container/kubernetes/README.md
+++ b/flink-container/kubernetes/README.md
@@ -20,7 +20,9 @@ The files contain the following variables:
 - `${FLINK_JOB_PARALLELISM}`: Degree of parallelism with which to start the Flink job and the number of required task managers
 
 One way to substitute the variables is to use `envsubst`.
-See [here]((https://stackoverflow.com/a/23622446/4815083)) for a guide to install it on Mac OS X.
+See [here](https://stackoverflow.com/a/23622446/4815083) for a guide to install it on Mac OS X.
+
+Alternatively, copy the template files (suffixed with `*.template`) and replace the variables.
 
 In non HA mode, you should first start the job cluster service:
 
@@ -28,20 +30,28 @@ In non HA mode, you should first start the job cluster service:
 
 In order to deploy the job cluster entrypoint run:
 
-`FLINK_IMAGE_NAME=<job-image> FLINK_JOB=<job-name> FLINK_JOB_PARALLELISM=<parallelism> envsubst < job-cluster-job.yaml.template | kubectl create -f -`
+`FLINK_IMAGE_NAME=<IMAGE_NAME> FLINK_JOB=<JOB_NAME> FLINK_JOB_PARALLELISM=<PARALLELISM> envsubst < job-cluster-job.yaml.template | kubectl create -f -`
 
 Now you should see the `flink-job-cluster` job being started by calling `kubectl get job`.
 
 At last, you should start the task manager deployment:
 
-`FLINK_IMAGE_NAME=<job-image> FLINK_JOB_PARALLELISM=<parallelism> envsubst < task-manager-deployment.yaml.template | kubectl create -f -`
+`FLINK_IMAGE_NAME=<IMAGE_NAME> FLINK_JOB_PARALLELISM=<PARALLELISM> envsubst < task-manager-deployment.yaml.template | kubectl create -f -`
+
+## Resuming from a savepoint
+
+In order to resume from a savepoint, one needs to pass the savepoint path to the cluster entrypoint.
+This can be achieved by adding `"--fromSavepoint", "<SAVEPOINT_PATH>"` to the `args` field in the [job-cluster-job.yaml.template](job-cluster-job.yaml.template).
+Note that `<SAVEPOINT_PATH>` needs to be accessible from the `job-cluster-job` pod (e.g. adding it to the image or storing it on a DFS).
+Additionally one can specify `"--allowNonRestoredState"` to allow that savepoint state is skipped which cannot be restored.
 
 ## Interact with Flink job cluster
 
-After starting the job cluster service, the web UI will be available under `<NodeIP>:30081`.
+After starting the job cluster service, the web UI will be available under `<NODE_IP>:30081`.
+In the case of Minikube, `<NODE_IP>` equals `minikube ip`.
 You can then use the Flink client to send Flink commands to the cluster:
 
-`bin/flink list -m <NodeIP:30081>`
+`bin/flink list -m <NODE_IP:30081>`
 
 ## Terminate Flink job cluster
 
diff --git a/flink-container/src/main/java/org/apache/flink/container/entrypoint/StandaloneJobClusterConfiguration.java b/flink-container/src/main/java/org/apache/flink/container/entrypoint/StandaloneJobClusterConfiguration.java
index e68e74b80a4..d8ad5aba8ea 100644
--- a/flink-container/src/main/java/org/apache/flink/container/entrypoint/StandaloneJobClusterConfiguration.java
+++ b/flink-container/src/main/java/org/apache/flink/container/entrypoint/StandaloneJobClusterConfiguration.java
@@ -19,8 +19,10 @@
 package org.apache.flink.container.entrypoint;
 
 import org.apache.flink.runtime.entrypoint.EntrypointClusterConfiguration;
+import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
 
 import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
 
 import java.util.Properties;
 
@@ -28,16 +30,26 @@
  * Configuration for the {@link StandaloneJobClusterEntryPoint}.
  */
 final class StandaloneJobClusterConfiguration extends EntrypointClusterConfiguration {
+
 	@Nonnull
 	private final String jobClassName;
 
-	public StandaloneJobClusterConfiguration(@Nonnull String configDir, @Nonnull Properties dynamicProperties, @Nonnull String[] args, int restPort, @Nonnull String jobClassName) {
-		super(configDir, dynamicProperties, args, restPort);
+	@Nonnull
+	private final SavepointRestoreSettings savepointRestoreSettings;
+
+	public StandaloneJobClusterConfiguration(@Nonnull String configDir, @Nonnull Properties dynamicProperties, @Nonnull String[] args, @Nullable String hostname, int restPort, @Nonnull String jobClassName, @Nonnull SavepointRestoreSettings savepointRestoreSettings) {
+		super(configDir, dynamicProperties, args, hostname, restPort);
 		this.jobClassName = jobClassName;
+		this.savepointRestoreSettings = savepointRestoreSettings;
 	}
 
 	@Nonnull
 	String getJobClassName() {
 		return jobClassName;
 	}
+
+	@Nonnull
+	public SavepointRestoreSettings getSavepointRestoreSettings() {
+		return savepointRestoreSettings;
+	}
 }
diff --git a/flink-container/src/main/java/org/apache/flink/container/entrypoint/StandaloneJobClusterConfigurationParserFactory.java b/flink-container/src/main/java/org/apache/flink/container/entrypoint/StandaloneJobClusterConfigurationParserFactory.java
index c0cb4739725..17217eff018 100644
--- a/flink-container/src/main/java/org/apache/flink/container/entrypoint/StandaloneJobClusterConfigurationParserFactory.java
+++ b/flink-container/src/main/java/org/apache/flink/container/entrypoint/StandaloneJobClusterConfigurationParserFactory.java
@@ -18,7 +18,9 @@
 
 package org.apache.flink.container.entrypoint;
 
+import org.apache.flink.client.cli.CliFrontendParser;
 import org.apache.flink.runtime.entrypoint.parser.ParserResultFactory;
+import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
@@ -30,6 +32,7 @@
 
 import static org.apache.flink.runtime.entrypoint.parser.CommandLineOptions.CONFIG_DIR_OPTION;
 import static org.apache.flink.runtime.entrypoint.parser.CommandLineOptions.DYNAMIC_PROPERTY_OPTION;
+import static org.apache.flink.runtime.entrypoint.parser.CommandLineOptions.HOST_OPTION;
 import static org.apache.flink.runtime.entrypoint.parser.CommandLineOptions.REST_PORT_OPTION;
 
 /**
@@ -53,6 +56,8 @@ public Options getOptions() {
 		options.addOption(REST_PORT_OPTION);
 		options.addOption(JOB_CLASS_NAME_OPTION);
 		options.addOption(DYNAMIC_PROPERTY_OPTION);
+		options.addOption(CliFrontendParser.SAVEPOINT_PATH_OPTION);
+		options.addOption(CliFrontendParser.SAVEPOINT_ALLOW_NON_RESTORED_OPTION);
 
 		return options;
 	}
@@ -63,13 +68,17 @@ public StandaloneJobClusterConfiguration createResult(@Nonnull CommandLine comma
 		final Properties dynamicProperties = commandLine.getOptionProperties(DYNAMIC_PROPERTY_OPTION.getOpt());
 		final String restPortString = commandLine.getOptionValue(REST_PORT_OPTION.getOpt(), "-1");
 		final int restPort = Integer.parseInt(restPortString);
+		final String hostname = commandLine.getOptionValue(HOST_OPTION.getOpt());
 		final String jobClassName = commandLine.getOptionValue(JOB_CLASS_NAME_OPTION.getOpt());
+		final SavepointRestoreSettings savepointRestoreSettings = CliFrontendParser.createSavepointRestoreSettings(commandLine);
 
 		return new StandaloneJobClusterConfiguration(
 			configDir,
 			dynamicProperties,
 			commandLine.getArgs(),
+			hostname,
 			restPort,
-			jobClassName);
+			jobClassName,
+			savepointRestoreSettings);
 	}
 }
diff --git a/flink-container/src/main/java/org/apache/flink/container/entrypoint/StandaloneJobClusterEntryPoint.java b/flink-container/src/main/java/org/apache/flink/container/entrypoint/StandaloneJobClusterEntryPoint.java
index 47cca4c7d85..78afa48880f 100644
--- a/flink-container/src/main/java/org/apache/flink/container/entrypoint/StandaloneJobClusterEntryPoint.java
+++ b/flink-container/src/main/java/org/apache/flink/container/entrypoint/StandaloneJobClusterEntryPoint.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.container.entrypoint;
 
+import org.apache.flink.api.common.JobID;
 import org.apache.flink.client.program.PackagedProgram;
 import org.apache.flink.client.program.PackagedProgramUtils;
 import org.apache.flink.client.program.ProgramInvocationException;
@@ -33,7 +34,9 @@
 import org.apache.flink.runtime.heartbeat.HeartbeatServices;
 import org.apache.flink.runtime.highavailability.HighAvailabilityServices;
 import org.apache.flink.runtime.jobgraph.JobGraph;
+import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
 import org.apache.flink.runtime.metrics.MetricRegistry;
+import org.apache.flink.runtime.metrics.groups.JobManagerMetricGroup;
 import org.apache.flink.runtime.resourcemanager.ResourceManager;
 import org.apache.flink.runtime.resourcemanager.ResourceManagerConfiguration;
 import org.apache.flink.runtime.resourcemanager.ResourceManagerRuntimeServices;
@@ -51,20 +54,33 @@
 
 import java.util.concurrent.CompletableFuture;
 
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
 /**
  * {@link JobClusterEntrypoint} which is started with a job in a predefined
  * location.
  */
 public final class StandaloneJobClusterEntryPoint extends JobClusterEntrypoint {
 
-	private  static final String[] EMPTY_ARGS = new String[0];
+	static final JobID FIXED_JOB_ID = new JobID(0L, 0L);
+
+	private final String[] programArguments;
 
 	@Nonnull
 	private final String jobClassName;
 
-	StandaloneJobClusterEntryPoint(Configuration configuration, @Nonnull String jobClassName) {
+	@Nonnull
+	private final SavepointRestoreSettings savepointRestoreSettings;
+
+	StandaloneJobClusterEntryPoint(
+			Configuration configuration,
+			@Nonnull String jobClassName,
+			@Nonnull SavepointRestoreSettings savepointRestoreSettings,
+			@Nonnull String[] programArguments) {
 		super(configuration);
-		this.jobClassName = jobClassName;
+		this.programArguments = checkNotNull(programArguments);
+		this.jobClassName = checkNotNull(jobClassName);
+		this.savepointRestoreSettings = savepointRestoreSettings;
 	}
 
 	@Override
@@ -72,8 +88,9 @@ protected JobGraph retrieveJobGraph(Configuration configuration) throws FlinkExc
 		final PackagedProgram packagedProgram = createPackagedProgram();
 		final int defaultParallelism = configuration.getInteger(CoreOptions.DEFAULT_PARALLELISM);
 		try {
-			final JobGraph jobGraph = PackagedProgramUtils.createJobGraph(packagedProgram, configuration, defaultParallelism);
+			final JobGraph jobGraph = PackagedProgramUtils.createJobGraph(packagedProgram, configuration, defaultParallelism, FIXED_JOB_ID);
 			jobGraph.setAllowQueuedScheduling(true);
+			jobGraph.setSavepointRestoreSettings(savepointRestoreSettings);
 
 			return jobGraph;
 		} catch (Exception e) {
@@ -84,7 +101,7 @@ protected JobGraph retrieveJobGraph(Configuration configuration) throws FlinkExc
 	private PackagedProgram createPackagedProgram() throws FlinkException {
 		try {
 			final Class<?> mainClass = getClass().getClassLoader().loadClass(jobClassName);
-			return new PackagedProgram(mainClass, EMPTY_ARGS);
+			return new PackagedProgram(mainClass, programArguments);
 		} catch (ClassNotFoundException | ProgramInvocationException e) {
 			throw new FlinkException("Could not load the provied entrypoint class.", e);
 		}
@@ -105,7 +122,8 @@ protected void registerShutdownActions(CompletableFuture<ApplicationStatus> term
 			MetricRegistry metricRegistry,
 			FatalErrorHandler fatalErrorHandler,
 			ClusterInformation clusterInformation,
-			@Nullable String webInterfaceUrl) throws Exception {
+			@Nullable String webInterfaceUrl,
+			JobManagerMetricGroup jobManagerMetricGroup) throws Exception {
 		final ResourceManagerConfiguration resourceManagerConfiguration = ResourceManagerConfiguration.fromConfiguration(configuration);
 		final ResourceManagerRuntimeServicesConfiguration resourceManagerRuntimeServicesConfiguration = ResourceManagerRuntimeServicesConfiguration.fromConfiguration(configuration);
 		final ResourceManagerRuntimeServices resourceManagerRuntimeServices = ResourceManagerRuntimeServices.fromConfiguration(
@@ -124,7 +142,8 @@ protected void registerShutdownActions(CompletableFuture<ApplicationStatus> term
 			metricRegistry,
 			resourceManagerRuntimeServices.getJobLeaderIdService(),
 			clusterInformation,
-			fatalErrorHandler);
+			fatalErrorHandler,
+			jobManagerMetricGroup);
 	}
 
 	public static void main(String[] args) {
@@ -140,7 +159,7 @@ public static void main(String[] args) {
 			clusterConfiguration = commandLineParser.parse(args);
 		} catch (FlinkParseException e) {
 			LOG.error("Could not parse command line arguments {}.", args, e);
-			commandLineParser.printHelp();
+			commandLineParser.printHelp(StandaloneJobClusterEntryPoint.class.getSimpleName());
 			System.exit(1);
 		}
 
@@ -148,7 +167,11 @@ public static void main(String[] args) {
 
 		configuration.setString(ClusterEntrypoint.EXECUTION_MODE, ExecutionMode.DETACHED.toString());
 
-		StandaloneJobClusterEntryPoint entrypoint = new StandaloneJobClusterEntryPoint(configuration, clusterConfiguration.getJobClassName());
+		StandaloneJobClusterEntryPoint entrypoint = new StandaloneJobClusterEntryPoint(
+			configuration,
+			clusterConfiguration.getJobClassName(),
+			clusterConfiguration.getSavepointRestoreSettings(),
+			clusterConfiguration.getArgs());
 
 		entrypoint.startCluster();
 	}
diff --git a/flink-container/src/test/java/org/apache/flink/container/entrypoint/StandaloneJobClusterConfigurationParserFactoryTest.java b/flink-container/src/test/java/org/apache/flink/container/entrypoint/StandaloneJobClusterConfigurationParserFactoryTest.java
index 1f39a0609e7..4d36e497bfa 100644
--- a/flink-container/src/test/java/org/apache/flink/container/entrypoint/StandaloneJobClusterConfigurationParserFactoryTest.java
+++ b/flink-container/src/test/java/org/apache/flink/container/entrypoint/StandaloneJobClusterConfigurationParserFactoryTest.java
@@ -20,6 +20,7 @@
 
 import org.apache.flink.runtime.entrypoint.FlinkParseException;
 import org.apache.flink.runtime.entrypoint.parser.CommandLineParser;
+import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
 import org.apache.flink.util.TestLogger;
 
 import org.junit.Test;
@@ -38,28 +39,30 @@
 public class StandaloneJobClusterConfigurationParserFactoryTest extends TestLogger {
 
 	private static final CommandLineParser<StandaloneJobClusterConfiguration> commandLineParser = new CommandLineParser<>(new StandaloneJobClusterConfigurationParserFactory());
+	private static final String JOB_CLASS_NAME = "foobar";
+	private static final String CONFIG_DIR = "/foo/bar";
 
 	@Test
 	public void testEntrypointClusterConfigurationParsing() throws FlinkParseException {
-		final String configDir = "/foo/bar";
 		final String key = "key";
 		final String value = "value";
 		final int restPort = 1234;
-		final String jobClassName = "foobar";
 		final String arg1 = "arg1";
 		final String arg2 = "arg2";
-		final String[] args = {"--configDir", configDir, "--webui-port", String.valueOf(restPort), "--job-classname", jobClassName, String.format("-D%s=%s", key, value), arg1, arg2};
+		final String[] args = {"--configDir", CONFIG_DIR, "--webui-port", String.valueOf(restPort), "--job-classname", JOB_CLASS_NAME, String.format("-D%s=%s", key, value), arg1, arg2};
 
 		final StandaloneJobClusterConfiguration clusterConfiguration = commandLineParser.parse(args);
 
-		assertThat(clusterConfiguration.getConfigDir(), is(equalTo(configDir)));
-		assertThat(clusterConfiguration.getJobClassName(), is(equalTo(jobClassName)));
+		assertThat(clusterConfiguration.getConfigDir(), is(equalTo(CONFIG_DIR)));
+		assertThat(clusterConfiguration.getJobClassName(), is(equalTo(JOB_CLASS_NAME)));
 		assertThat(clusterConfiguration.getRestPort(), is(equalTo(restPort)));
 		final Properties dynamicProperties = clusterConfiguration.getDynamicProperties();
 
 		assertThat(dynamicProperties, hasEntry(key, value));
 
 		assertThat(clusterConfiguration.getArgs(), arrayContaining(arg1, arg2));
+
+		assertThat(clusterConfiguration.getSavepointRestoreSettings(), is(equalTo(SavepointRestoreSettings.none())));
 	}
 
 	@Test
@@ -81,4 +84,17 @@ public void testMissingRequiredArgument() throws FlinkParseException {
 
 		commandLineParser.parse(args);
 	}
+
+	@Test
+	public void testSavepointRestoreSettingsParsing() throws FlinkParseException {
+		final String restorePath = "foobar";
+		final String[] args = {"-c", CONFIG_DIR, "-j", JOB_CLASS_NAME, "-s", restorePath, "-n"};
+		final StandaloneJobClusterConfiguration standaloneJobClusterConfiguration = commandLineParser.parse(args);
+
+		final SavepointRestoreSettings savepointRestoreSettings = standaloneJobClusterConfiguration.getSavepointRestoreSettings();
+
+		assertThat(savepointRestoreSettings.restoreSavepoint(), is(true));
+		assertThat(savepointRestoreSettings.getRestorePath(), is(equalTo(restorePath)));
+		assertThat(savepointRestoreSettings.allowNonRestoredState(), is(true));
+	}
 }
diff --git a/flink-container/src/test/java/org/apache/flink/container/entrypoint/StandaloneJobClusterEntryPointTest.java b/flink-container/src/test/java/org/apache/flink/container/entrypoint/StandaloneJobClusterEntryPointTest.java
index 360799d19a8..07bcb63f48f 100644
--- a/flink-container/src/test/java/org/apache/flink/container/entrypoint/StandaloneJobClusterEntryPointTest.java
+++ b/flink-container/src/test/java/org/apache/flink/container/entrypoint/StandaloneJobClusterEntryPointTest.java
@@ -21,13 +21,16 @@
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.configuration.CoreOptions;
 import org.apache.flink.runtime.jobgraph.JobGraph;
+import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
 import org.apache.flink.util.FlinkException;
 import org.apache.flink.util.TestLogger;
 
 import org.junit.Test;
 
+import static org.apache.flink.container.entrypoint.StandaloneJobClusterEntryPoint.FIXED_JOB_ID;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.is;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
 
 /**
@@ -35,6 +38,8 @@
  */
 public class StandaloneJobClusterEntryPointTest extends TestLogger {
 
+	public static final String[] PROGRAM_ARGUMENTS = {"--arg", "suffix"};
+
 	@Test
 	public void testJobGraphRetrieval() throws FlinkException {
 		final Configuration configuration = new Configuration();
@@ -42,12 +47,30 @@ public void testJobGraphRetrieval() throws FlinkException {
 		configuration.setInteger(CoreOptions.DEFAULT_PARALLELISM, parallelism);
 		final StandaloneJobClusterEntryPoint standaloneJobClusterEntryPoint = new StandaloneJobClusterEntryPoint(
 			configuration,
-			TestJob.class.getCanonicalName());
+			TestJob.class.getCanonicalName(),
+			SavepointRestoreSettings.none(),
+			PROGRAM_ARGUMENTS);
 
 		final JobGraph jobGraph = standaloneJobClusterEntryPoint.retrieveJobGraph(configuration);
 
-		assertThat(jobGraph.getName(), is(equalTo(TestJob.class.getCanonicalName())));
+		assertThat(jobGraph.getName(), is(equalTo(TestJob.class.getCanonicalName() + "-suffix")));
 		assertThat(jobGraph.getMaximumParallelism(), is(parallelism));
+		assertEquals(jobGraph.getJobID(), FIXED_JOB_ID);
 	}
 
+	@Test
+	public void testSavepointRestoreSettings() throws FlinkException {
+		final Configuration configuration = new Configuration();
+		final SavepointRestoreSettings savepointRestoreSettings = SavepointRestoreSettings.forPath("foobar", true);
+		final StandaloneJobClusterEntryPoint jobClusterEntryPoint = new StandaloneJobClusterEntryPoint(
+			configuration,
+			TestJob.class.getCanonicalName(),
+			savepointRestoreSettings,
+			PROGRAM_ARGUMENTS);
+
+		final JobGraph jobGraph = jobClusterEntryPoint.retrieveJobGraph(configuration);
+
+		assertThat(jobGraph.getSavepointRestoreSettings(), is(equalTo(savepointRestoreSettings)));
+		assertEquals(jobGraph.getJobID(), FIXED_JOB_ID);
+	}
 }
diff --git a/flink-container/src/test/java/org/apache/flink/container/entrypoint/TestJob.java b/flink-container/src/test/java/org/apache/flink/container/entrypoint/TestJob.java
index 5f8857fc35f..ada434dd8b7 100644
--- a/flink-container/src/test/java/org/apache/flink/container/entrypoint/TestJob.java
+++ b/flink-container/src/test/java/org/apache/flink/container/entrypoint/TestJob.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.container.entrypoint;
 
+import org.apache.flink.api.java.utils.ParameterTool;
 import org.apache.flink.streaming.api.datastream.DataStreamSource;
 import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
@@ -35,6 +36,7 @@ public static void main(String[] args) throws Exception {
 		final SingleOutputStreamOperator<Integer> mapper = source.map(element -> 2 * element);
 		mapper.addSink(new DiscardingSink<>());
 
-		env.execute(TestJob.class.getCanonicalName());
+		ParameterTool parameterTool = ParameterTool.fromArgs(args);
+		env.execute(TestJob.class.getCanonicalName() + "-" + parameterTool.getRequired("arg"));
 	}
 }
diff --git a/flink-contrib/docker-flink/docker-entrypoint.sh b/flink-contrib/docker-flink/docker-entrypoint.sh
index 1fc13cf06eb..1edf99ca6ab 100755
--- a/flink-contrib/docker-flink/docker-entrypoint.sh
+++ b/flink-contrib/docker-flink/docker-entrypoint.sh
@@ -30,7 +30,7 @@ elif [ "$1" == "jobmanager" ]; then
     sed -i -e "s/jobmanager.rpc.address: localhost/jobmanager.rpc.address: ${JOB_MANAGER_RPC_ADDRESS}/g" $FLINK_HOME/conf/flink-conf.yaml
 
     echo "config file: " && grep '^[^\n#]' $FLINK_HOME/conf/flink-conf.yaml
-    exec $FLINK_HOME/bin/jobmanager.sh start-foreground cluster
+    exec $FLINK_HOME/bin/jobmanager.sh start-foreground
 elif [ "$1" == "taskmanager" ]; then
 
     sed -i -e "s/jobmanager.rpc.address: localhost/jobmanager.rpc.address: ${JOB_MANAGER_RPC_ADDRESS}/g" $FLINK_HOME/conf/flink-conf.yaml
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java b/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
index 59fa803791a..6b7caaac6ec 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
@@ -22,6 +22,7 @@
 import org.apache.flink.annotation.Public;
 import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.restartstrategy.RestartStrategies;
+import org.apache.flink.configuration.MetricOptions;
 import org.apache.flink.configuration.TaskManagerOptions;
 import org.apache.flink.util.Preconditions;
 
@@ -131,7 +132,9 @@
 	/**
 	 * Interval in milliseconds for sending latency tracking marks from the sources to the sinks.
 	 */
-	private long latencyTrackingInterval = 2000L;
+	private long latencyTrackingInterval = MetricOptions.LATENCY_INTERVAL.defaultValue();
+
+	private boolean isLatencyTrackingConfigured = false;
 
 	/**
 	 * @deprecated Should no longer be used because it is subsumed by RestartStrategyConfiguration
@@ -234,8 +237,6 @@ public long getAutoWatermarkInterval()  {
 	 * Interval for sending latency tracking marks from the sources to the sinks.
 	 * Flink will send latency tracking marks from the sources at the specified interval.
 	 *
-	 * Recommended value: 2000 (2 seconds).
-	 *
 	 * Setting a tracking interval <= 0 disables the latency tracking.
 	 *
 	 * @param interval Interval in milliseconds.
@@ -243,6 +244,7 @@ public long getAutoWatermarkInterval()  {
 	@PublicEvolving
 	public ExecutionConfig setLatencyTrackingInterval(long interval) {
 		this.latencyTrackingInterval = interval;
+		this.isLatencyTrackingConfigured = true;
 		return this;
 	}
 
@@ -256,12 +258,17 @@ public long getLatencyTrackingInterval() {
 	}
 
 	/**
-	 * Returns if latency tracking is enabled
-	 * @return True, if the tracking is enabled, false otherwise.
+	 * @deprecated will be removed in a future version
 	 */
 	@PublicEvolving
+	@Deprecated
 	public boolean isLatencyTrackingEnabled() {
-		return latencyTrackingInterval > 0;
+		return isLatencyTrackingConfigured && latencyTrackingInterval > 0;
+	}
+
+	@Internal
+	public boolean isLatencyTrackingConfigured() {
+		return isLatencyTrackingConfigured;
 	}
 
 	/**
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/serialization/BulkWriter.java b/flink-core/src/main/java/org/apache/flink/api/common/serialization/BulkWriter.java
new file mode 100644
index 00000000000..44f5fbe987a
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/api/common/serialization/BulkWriter.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.api.common.serialization;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.core.fs.FSDataOutputStream;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ * An encoder that encodes data in a bulk fashion, encoding many records together at a time.
+ *
+ * <p>Examples for bulk encoding are most compressed formats, including formats like
+ * Parquet and ORC which encode batches of records into blocks of column vectors.
+ *
+ * <p>The bulk encoder may be stateful and is bound to a single stream during its
+ * lifetime.
+ *
+ * @param <T> The type of the elements encoded through this encoder.
+ */
+@PublicEvolving
+public interface BulkWriter<T> {
+
+	/**
+	 * Adds an element to the encoder. The encoder may temporarily buffer the element,
+	 * or immediately write it to the stream.
+	 *
+	 * <p>It may be that adding this element fills up an internal buffer and causes the
+	 * encoding and flushing of a batch of internally buffered elements.
+	 *
+	 * @param element The element to add.
+	 * @throws IOException Thrown, if the element cannot be added to the encoder,
+	 *                     or if the output stream throws an exception.
+	 */
+	void addElement(T element) throws IOException;
+
+	/**
+	 * Flushes all intermediate buffered data to the output stream.
+	 * It is expected that flushing often may reduce the efficiency of the encoding.
+	 *
+	 * @throws IOException Thrown if the encoder cannot be flushed, or if the output
+	 *                     stream throws an exception.
+	 */
+	void flush() throws IOException;
+
+	/**
+	 * Finishes the writing. This must flush all internal buffer, finish encoding, and write
+	 * footers.
+	 *
+	 * <p>The writer is not expected to handle any more records via {@link #addElement(Object)} after
+	 * this method is called.
+	 *
+	 * <p><b>Important:</b> This method MUST NOT close the stream that the writer writes to.
+	 * Closing the stream is expected to happen through the invoker of this method afterwards.
+	 *
+	 * @throws IOException Thrown if the finalization fails.
+	 */
+	void finish() throws IOException;
+
+	// ------------------------------------------------------------------------
+
+	/**
+	 * A factory that creates a {@link BulkWriter}.
+	 * @param <T> The type of record to write.
+	 */
+	@FunctionalInterface
+	interface Factory<T> extends Serializable {
+
+		/**
+		 * Creates a writer that writes to the given stream.
+		 *
+		 * @param out The output stream to write the encoded data to.
+		 * @throws IOException Thrown if the writer cannot be opened, or if the output
+		 *                     stream throws an exception.
+		 */
+		BulkWriter<T> create(FSDataOutputStream out) throws IOException;
+	}
+}
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java b/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java
index 6eb8ddcd649..9b0094d50cd 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java
@@ -37,6 +37,12 @@
  * <p>To create keyed map state (on a KeyedStream), use
  * {@link org.apache.flink.api.common.functions.RuntimeContext#getMapState(MapStateDescriptor)}.
  *
+ * <p>Note: The map state with TTL currently supports {@code null} user values
+ * only if the user value serializer can handle {@code null} values.
+ * If the serializer does not support {@code null} values,
+ * it can be wrapped with {@link org.apache.flink.api.java.typeutils.runtime.NullableSerializer}
+ * at the cost of an extra byte in the serialized form.
+ *
  * @param <UK> The type of the keys that can be added to the map state.
  */
 @PublicEvolving
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java b/flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java
index 191eb6f907a..422d77f9f52 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java
@@ -96,7 +96,7 @@
 
 	/** Name for queries against state created from this StateDescriptor. */
 	@Nonnull
-	private StateTtlConfiguration ttlConfig = StateTtlConfiguration.DISABLED;
+	private StateTtlConfig ttlConfig = StateTtlConfig.DISABLED;
 
 	/** The default value returned by the state when no other value is bound to a key. */
 	@Nullable
@@ -210,7 +210,7 @@ public T getDefaultValue() {
 	 */
 	public void setQueryable(String queryableStateName) {
 		Preconditions.checkArgument(
-			ttlConfig.getTtlUpdateType() == StateTtlConfiguration.TtlUpdateType.Disabled,
+			ttlConfig.getUpdateType() == StateTtlConfig.UpdateType.Disabled,
 			"Queryable state is currently not supported with TTL");
 		if (this.queryableStateName == null) {
 			this.queryableStateName = Preconditions.checkNotNull(queryableStateName, "Registration name");
@@ -243,14 +243,14 @@ public boolean isQueryable() {
 	 * Configures optional activation of state time-to-live (TTL).
 	 *
 	 * <p>State user value will expire, become unavailable and be cleaned up in storage
-	 * depending on configured {@link StateTtlConfiguration}.
+	 * depending on configured {@link StateTtlConfig}.
 	 *
 	 * @param ttlConfig configuration of state TTL
 	 */
-	public void enableTimeToLive(StateTtlConfiguration ttlConfig) {
+	public void enableTimeToLive(StateTtlConfig ttlConfig) {
 		Preconditions.checkNotNull(ttlConfig);
 		Preconditions.checkArgument(
-			ttlConfig.getTtlUpdateType() != StateTtlConfiguration.TtlUpdateType.Disabled &&
+			ttlConfig.getUpdateType() != StateTtlConfig.UpdateType.Disabled &&
 				queryableStateName == null,
 			"Queryable state is currently not supported with TTL");
 		this.ttlConfig = ttlConfig;
@@ -258,7 +258,7 @@ public void enableTimeToLive(StateTtlConfiguration ttlConfig) {
 
 	@Nonnull
 	@Internal
-	public StateTtlConfiguration getTtlConfig() {
+	public StateTtlConfig getTtlConfig() {
 		return ttlConfig;
 	}
 
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfig.java b/flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfig.java
new file mode 100644
index 00000000000..42eaea4c482
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfig.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.api.common.state;
+
+import org.apache.flink.api.common.time.Time;
+import org.apache.flink.util.Preconditions;
+
+import javax.annotation.Nonnull;
+
+import java.io.Serializable;
+import java.util.EnumMap;
+
+import static org.apache.flink.api.common.state.StateTtlConfig.StateVisibility.NeverReturnExpired;
+import static org.apache.flink.api.common.state.StateTtlConfig.TimeCharacteristic.ProcessingTime;
+import static org.apache.flink.api.common.state.StateTtlConfig.UpdateType.OnCreateAndWrite;
+
+/**
+ * Configuration of state TTL logic.
+ *
+ * <p>Note: The map state with TTL currently supports {@code null} user values
+ * only if the user value serializer can handle {@code null} values.
+ * If the serializer does not support {@code null} values,
+ * it can be wrapped with {@link org.apache.flink.api.java.typeutils.runtime.NullableSerializer}
+ * at the cost of an extra byte in the serialized form.
+ */
+public class StateTtlConfig implements Serializable {
+
+	private static final long serialVersionUID = -7592693245044289793L;
+
+	public static final StateTtlConfig DISABLED =
+		newBuilder(Time.milliseconds(Long.MAX_VALUE)).setUpdateType(UpdateType.Disabled).build();
+
+	/**
+	 * This option value configures when to update last access timestamp which prolongs state TTL.
+	 */
+	public enum UpdateType {
+		/** TTL is disabled. State does not expire. */
+		Disabled,
+		/** Last access timestamp is initialised when state is created and updated on every write operation. */
+		OnCreateAndWrite,
+		/** The same as <code>OnCreateAndWrite</code> but also updated on read. */
+		OnReadAndWrite
+	}
+
+	/**
+	 * This option configures whether expired user value can be returned or not.
+	 */
+	public enum StateVisibility {
+		/** Return expired user value if it is not cleaned up yet. */
+		ReturnExpiredIfNotCleanedUp,
+		/** Never return expired user value. */
+		NeverReturnExpired
+	}
+
+	/**
+	 * This option configures time scale to use for ttl.
+	 */
+	public enum TimeCharacteristic {
+		/** Processing time, see also <code>TimeCharacteristic.ProcessingTime</code>. */
+		ProcessingTime
+	}
+
+	private final UpdateType updateType;
+	private final StateVisibility stateVisibility;
+	private final TimeCharacteristic timeCharacteristic;
+	private final Time ttl;
+	private final CleanupStrategies cleanupStrategies;
+
+	private StateTtlConfig(
+		UpdateType updateType,
+		StateVisibility stateVisibility,
+		TimeCharacteristic timeCharacteristic,
+		Time ttl,
+		CleanupStrategies cleanupStrategies) {
+		this.updateType = Preconditions.checkNotNull(updateType);
+		this.stateVisibility = Preconditions.checkNotNull(stateVisibility);
+		this.timeCharacteristic = Preconditions.checkNotNull(timeCharacteristic);
+		this.ttl = Preconditions.checkNotNull(ttl);
+		this.cleanupStrategies = cleanupStrategies;
+		Preconditions.checkArgument(ttl.toMilliseconds() > 0,
+			"TTL is expected to be positive");
+	}
+
+	@Nonnull
+	public UpdateType getUpdateType() {
+		return updateType;
+	}
+
+	@Nonnull
+	public StateVisibility getStateVisibility() {
+		return stateVisibility;
+	}
+
+	@Nonnull
+	public Time getTtl() {
+		return ttl;
+	}
+
+	@Nonnull
+	public TimeCharacteristic getTimeCharacteristic() {
+		return timeCharacteristic;
+	}
+
+	public boolean isEnabled() {
+		return updateType != UpdateType.Disabled;
+	}
+
+	@Nonnull
+	public CleanupStrategies getCleanupStrategies() {
+		return cleanupStrategies;
+	}
+
+	@Override
+	public String toString() {
+		return "StateTtlConfig{" +
+			"updateType=" + updateType +
+			", stateVisibility=" + stateVisibility +
+			", timeCharacteristic=" + timeCharacteristic +
+			", ttl=" + ttl +
+			'}';
+	}
+
+	@Nonnull
+	public static Builder newBuilder(@Nonnull Time ttl) {
+		return new Builder(ttl);
+	}
+
+	/**
+	 * Builder for the {@link StateTtlConfig}.
+	 */
+	public static class Builder {
+
+		private UpdateType updateType = OnCreateAndWrite;
+		private StateVisibility stateVisibility = NeverReturnExpired;
+		private TimeCharacteristic timeCharacteristic = ProcessingTime;
+		private Time ttl;
+		private CleanupStrategies cleanupStrategies = new CleanupStrategies();
+
+		public Builder(@Nonnull Time ttl) {
+			this.ttl = ttl;
+		}
+
+		/**
+		 * Sets the ttl update type.
+		 *
+		 * @param updateType The ttl update type configures when to update last access timestamp which prolongs state TTL.
+		 */
+		@Nonnull
+		public Builder setUpdateType(UpdateType updateType) {
+			this.updateType = updateType;
+			return this;
+		}
+
+		@Nonnull
+		public Builder updateTtlOnCreateAndWrite() {
+			return setUpdateType(UpdateType.OnCreateAndWrite);
+		}
+
+		@Nonnull
+		public Builder updateTtlOnReadAndWrite() {
+			return setUpdateType(UpdateType.OnReadAndWrite);
+		}
+
+		/**
+		 * Sets the state visibility.
+		 *
+		 * @param stateVisibility The state visibility configures whether expired user value can be returned or not.
+		 */
+		@Nonnull
+		public Builder setStateVisibility(@Nonnull StateVisibility stateVisibility) {
+			this.stateVisibility = stateVisibility;
+			return this;
+		}
+
+		@Nonnull
+		public Builder returnExpiredIfNotCleanedUp() {
+			return setStateVisibility(StateVisibility.ReturnExpiredIfNotCleanedUp);
+		}
+
+		@Nonnull
+		public Builder neverReturnExpired() {
+			return setStateVisibility(StateVisibility.NeverReturnExpired);
+		}
+
+		/**
+		 * Sets the time characteristic.
+		 *
+		 * @param timeCharacteristic The time characteristic configures time scale to use for ttl.
+		 */
+		@Nonnull
+		public Builder setTimeCharacteristic(@Nonnull TimeCharacteristic timeCharacteristic) {
+			this.timeCharacteristic = timeCharacteristic;
+			return this;
+		}
+
+		@Nonnull
+		public Builder useProcessingTime() {
+			return setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
+		}
+
+		/** Cleanup expired state in full snapshot on checkpoint. */
+		@Nonnull
+		public Builder cleanupFullSnapshot() {
+			cleanupStrategies.strategies.put(
+				CleanupStrategies.Strategies.FULL_STATE_SCAN_SNAPSHOT,
+				new CleanupStrategies.CleanupStrategy() {  });
+			return this;
+		}
+
+		/**
+		 * Sets the ttl time.
+		 * @param ttl The ttl time.
+		 */
+		@Nonnull
+		public Builder setTtl(@Nonnull Time ttl) {
+			this.ttl = ttl;
+			return this;
+		}
+
+		@Nonnull
+		public StateTtlConfig build() {
+			return new StateTtlConfig(
+				updateType,
+				stateVisibility,
+				timeCharacteristic,
+				ttl,
+				cleanupStrategies);
+		}
+	}
+
+	/**
+	 * TTL cleanup strategies.
+	 *
+	 * <p>This class configures when to cleanup expired state with TTL.
+	 * By default, state is always cleaned up on explicit read access if found expired.
+	 * Currently cleanup of state full snapshot can be additionally activated.
+	 */
+	public static class CleanupStrategies implements Serializable {
+		private static final long serialVersionUID = -1617740467277313524L;
+
+		/** Fixed strategies ordinals in {@code strategies} config field. */
+		enum Strategies {
+			FULL_STATE_SCAN_SNAPSHOT
+		}
+
+		/** Base interface for cleanup strategies configurations. */
+		interface CleanupStrategy extends Serializable {
+
+		}
+
+		final EnumMap<Strategies, CleanupStrategy> strategies = new EnumMap<>(Strategies.class);
+
+		public boolean inFullSnapshot() {
+			return strategies.containsKey(Strategies.FULL_STATE_SCAN_SNAPSHOT);
+		}
+	}
+}
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfiguration.java b/flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfiguration.java
deleted file mode 100644
index 55ec29c19aa..00000000000
--- a/flink-core/src/main/java/org/apache/flink/api/common/state/StateTtlConfiguration.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.api.common.state;
-
-import org.apache.flink.api.common.time.Time;
-import org.apache.flink.util.Preconditions;
-
-import java.io.Serializable;
-
-import static org.apache.flink.api.common.state.StateTtlConfiguration.TtlStateVisibility.NeverReturnExpired;
-import static org.apache.flink.api.common.state.StateTtlConfiguration.TtlTimeCharacteristic.ProcessingTime;
-import static org.apache.flink.api.common.state.StateTtlConfiguration.TtlUpdateType.OnCreateAndWrite;
-
-/**
- * Configuration of state TTL logic.
- */
-public class StateTtlConfiguration implements Serializable {
-
-	private static final long serialVersionUID = -7592693245044289793L;
-
-	public static final StateTtlConfiguration DISABLED =
-		newBuilder(Time.milliseconds(Long.MAX_VALUE)).setTtlUpdateType(TtlUpdateType.Disabled).build();
-
-	/**
-	 * This option value configures when to update last access timestamp which prolongs state TTL.
-	 */
-	public enum TtlUpdateType {
-		/** TTL is disabled. State does not expire. */
-		Disabled,
-		/** Last access timestamp is initialised when state is created and updated on every write operation. */
-		OnCreateAndWrite,
-		/** The same as <code>OnCreateAndWrite</code> but also updated on read. */
-		OnReadAndWrite
-	}
-
-	/**
-	 * This option configures whether expired user value can be returned or not.
-	 */
-	public enum TtlStateVisibility {
-		/** Return expired user value if it is not cleaned up yet. */
-		ReturnExpiredIfNotCleanedUp,
-		/** Never return expired user value. */
-		NeverReturnExpired
-	}
-
-	/**
-	 * This option configures time scale to use for ttl.
-	 */
-	public enum TtlTimeCharacteristic {
-		/** Processing time, see also <code>TimeCharacteristic.ProcessingTime</code>. */
-		ProcessingTime
-	}
-
-	private final TtlUpdateType ttlUpdateType;
-	private final TtlStateVisibility stateVisibility;
-	private final TtlTimeCharacteristic timeCharacteristic;
-	private final Time ttl;
-
-	private StateTtlConfiguration(
-		TtlUpdateType ttlUpdateType,
-		TtlStateVisibility stateVisibility,
-		TtlTimeCharacteristic timeCharacteristic,
-		Time ttl) {
-		this.ttlUpdateType = Preconditions.checkNotNull(ttlUpdateType);
-		this.stateVisibility = Preconditions.checkNotNull(stateVisibility);
-		this.timeCharacteristic = Preconditions.checkNotNull(timeCharacteristic);
-		this.ttl = Preconditions.checkNotNull(ttl);
-		Preconditions.checkArgument(ttl.toMilliseconds() > 0,
-			"TTL is expected to be positive");
-	}
-
-	public TtlUpdateType getTtlUpdateType() {
-		return ttlUpdateType;
-	}
-
-	public TtlStateVisibility getStateVisibility() {
-		return stateVisibility;
-	}
-
-	public Time getTtl() {
-		return ttl;
-	}
-
-	public TtlTimeCharacteristic getTimeCharacteristic() {
-		return timeCharacteristic;
-	}
-
-	public boolean isEnabled() {
-		return ttlUpdateType != TtlUpdateType.Disabled;
-	}
-
-	@Override
-	public String toString() {
-		return "StateTtlConfiguration{" +
-			"ttlUpdateType=" + ttlUpdateType +
-			", stateVisibility=" + stateVisibility +
-			", timeCharacteristic=" + timeCharacteristic +
-			", ttl=" + ttl +
-			'}';
-	}
-
-	public static Builder newBuilder(Time ttl) {
-		return new Builder(ttl);
-	}
-
-	/**
-	 * Builder for the {@link StateTtlConfiguration}.
-	 */
-	public static class Builder {
-
-		private TtlUpdateType ttlUpdateType = OnCreateAndWrite;
-		private TtlStateVisibility stateVisibility = NeverReturnExpired;
-		private TtlTimeCharacteristic timeCharacteristic = ProcessingTime;
-		private Time ttl;
-
-		public Builder(Time ttl) {
-			this.ttl = ttl;
-		}
-
-		/**
-		 * Sets the ttl update type.
-		 *
-		 * @param ttlUpdateType The ttl update type configures when to update last access timestamp which prolongs state TTL.
-		 */
-		public Builder setTtlUpdateType(TtlUpdateType ttlUpdateType) {
-			this.ttlUpdateType = ttlUpdateType;
-			return this;
-		}
-
-		public Builder updateTtlOnCreateAndWrite() {
-			return setTtlUpdateType(TtlUpdateType.OnCreateAndWrite);
-		}
-
-		public Builder updateTtlOnReadAndWrite() {
-			return setTtlUpdateType(TtlUpdateType.OnReadAndWrite);
-		}
-
-		/**
-		 * Sets the state visibility.
-		 *
-		 * @param stateVisibility The state visibility configures whether expired user value can be returned or not.
-		 */
-		public Builder setStateVisibility(TtlStateVisibility stateVisibility) {
-			this.stateVisibility = stateVisibility;
-			return this;
-		}
-
-		public Builder returnExpiredIfNotCleanedUp() {
-			return setStateVisibility(TtlStateVisibility.ReturnExpiredIfNotCleanedUp);
-		}
-
-		public Builder neverReturnExpired() {
-			return setStateVisibility(TtlStateVisibility.NeverReturnExpired);
-		}
-
-		/**
-		 * Sets the time characteristic.
-		 *
-		 * @param timeCharacteristic The time characteristic configures time scale to use for ttl.
-		 */
-		public Builder setTimeCharacteristic(TtlTimeCharacteristic timeCharacteristic) {
-			this.timeCharacteristic = timeCharacteristic;
-			return this;
-		}
-
-		public Builder useProcessingTime() {
-			return setTimeCharacteristic(TtlTimeCharacteristic.ProcessingTime);
-		}
-
-		/**
-		 * Sets the ttl time.
-		 * @param ttl The ttl time.
-		 */
-		public Builder setTtl(Time ttl) {
-			this.ttl = ttl;
-			return this;
-		}
-
-		public StateTtlConfiguration build() {
-			return new StateTtlConfiguration(
-				ttlUpdateType,
-				stateVisibility,
-				timeCharacteristic,
-				ttl
-			);
-		}
-
-	}
-}
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/typeinfo/NothingTypeInfo.java b/flink-core/src/main/java/org/apache/flink/api/common/typeinfo/NothingTypeInfo.java
index 73b916fe948..307dfa0941b 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/typeinfo/NothingTypeInfo.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/typeinfo/NothingTypeInfo.java
@@ -53,7 +53,7 @@ public int getArity() {
 	@Override
 	@PublicEvolving
 	public int getTotalFields() {
-		return 0;
+		return 1;
 	}
 
 	@Override
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/typeinfo/TypeInformation.java b/flink-core/src/main/java/org/apache/flink/api/common/typeinfo/TypeInformation.java
index c5c077fc2ca..e04a494675c 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/typeinfo/TypeInformation.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/typeinfo/TypeInformation.java
@@ -111,6 +111,7 @@
 	 * fields, in the case of composite types. In the example below, the OuterType type has three
 	 * fields in total.
 	 *
+	 * <p>The total number of fields must be at least 1.
 	 *
 	 * @return The number of fields in this type, including its sub-fields (for composite types)
 	 */
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/LongSerializer.java b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/LongSerializer.java
index cbdc3db7476..2ed2cec5e53 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/LongSerializer.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/LongSerializer.java
@@ -18,12 +18,12 @@
 
 package org.apache.flink.api.common.typeutils.base;
 
-import java.io.IOException;
-
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.core.memory.DataInputView;
 import org.apache.flink.core.memory.DataOutputView;
 
+import java.io.IOException;
+
 @Internal
 public final class LongSerializer extends TypeSerializerSingleton<Long> {
 
@@ -31,7 +31,7 @@
 	
 	public static final LongSerializer INSTANCE = new LongSerializer();
 	
-	private static final Long ZERO = Long.valueOf(0);
+	private static final Long ZERO = 0L;
 
 	@Override
 	public boolean isImmutableType() {
@@ -55,17 +55,17 @@ public Long copy(Long from, Long reuse) {
 
 	@Override
 	public int getLength() {
-		return 8;
+		return Long.BYTES;
 	}
 
 	@Override
 	public void serialize(Long record, DataOutputView target) throws IOException {
-		target.writeLong(record.longValue());
+		target.writeLong(record);
 	}
 
 	@Override
 	public Long deserialize(DataInputView source) throws IOException {
-		return Long.valueOf(source.readLong());
+		return source.readLong();
 	}
 	
 	@Override
diff --git a/flink-core/src/main/java/org/apache/flink/api/java/tuple/Tuple.java b/flink-core/src/main/java/org/apache/flink/api/java/tuple/Tuple.java
index c282c59eac7..7ce38f8c14a 100644
--- a/flink-core/src/main/java/org/apache/flink/api/java/tuple/Tuple.java
+++ b/flink-core/src/main/java/org/apache/flink/api/java/tuple/Tuple.java
@@ -113,6 +113,38 @@
 
 	// BEGIN_OF_TUPLE_DEPENDENT_CODE
 	// GENERATED FROM org.apache.flink.api.java.tuple.TupleGenerator.
+	public static Tuple newInstance(int arity) {
+		switch (arity) {
+			case 0: return Tuple0.INSTANCE;
+			case 1: return new Tuple1();
+			case 2: return new Tuple2();
+			case 3: return new Tuple3();
+			case 4: return new Tuple4();
+			case 5: return new Tuple5();
+			case 6: return new Tuple6();
+			case 7: return new Tuple7();
+			case 8: return new Tuple8();
+			case 9: return new Tuple9();
+			case 10: return new Tuple10();
+			case 11: return new Tuple11();
+			case 12: return new Tuple12();
+			case 13: return new Tuple13();
+			case 14: return new Tuple14();
+			case 15: return new Tuple15();
+			case 16: return new Tuple16();
+			case 17: return new Tuple17();
+			case 18: return new Tuple18();
+			case 19: return new Tuple19();
+			case 20: return new Tuple20();
+			case 21: return new Tuple21();
+			case 22: return new Tuple22();
+			case 23: return new Tuple23();
+			case 24: return new Tuple24();
+			case 25: return new Tuple25();
+			default: throw new IllegalArgumentException("The tuple arity must be in [0, " + MAX_ARITY + "].");
+		}
+	}
+
 	private static final Class<?>[] CLASSES = new Class<?>[] {
 		Tuple0.class, Tuple1.class, Tuple2.class, Tuple3.class, Tuple4.class, Tuple5.class, Tuple6.class, Tuple7.class, Tuple8.class, Tuple9.class, Tuple10.class, Tuple11.class, Tuple12.class, Tuple13.class, Tuple14.class, Tuple15.class, Tuple16.class, Tuple17.class, Tuple18.class, Tuple19.class, Tuple20.class, Tuple21.class, Tuple22.class, Tuple23.class, Tuple24.class, Tuple25.class
 	};
diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractionUtils.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractionUtils.java
index f005ed9ee75..07f1e1e2ca4 100644
--- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractionUtils.java
+++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractionUtils.java
@@ -158,6 +158,7 @@ public static LambdaExecutable checkAndExtractLambda(Function function) throws T
 	/**
 	 * Extracts type from given index from lambda. It supports nested types.
 	 *
+	 * @param baseClass SAM function that the lambda implements
 	 * @param exec lambda function to extract the type from
 	 * @param lambdaTypeArgumentIndices position of type to extract in type hierarchy
 	 * @param paramLen count of total parameters of the lambda (including closure parameters)
@@ -165,14 +166,17 @@ public static LambdaExecutable checkAndExtractLambda(Function function) throws T
 	 * @return extracted type
 	 */
 	public static Type extractTypeFromLambda(
+		Class<?> baseClass,
 		LambdaExecutable exec,
 		int[] lambdaTypeArgumentIndices,
 		int paramLen,
 		int baseParametersLen) {
 		Type output = exec.getParameterTypes()[paramLen - baseParametersLen + lambdaTypeArgumentIndices[0]];
 		for (int i = 1; i < lambdaTypeArgumentIndices.length; i++) {
+			validateLambdaType(baseClass, output);
 			output = extractTypeArgument(output, lambdaTypeArgumentIndices[i]);
 		}
+		validateLambdaType(baseClass, output);
 		return output;
 	}
 
@@ -328,4 +332,23 @@ public static boolean hasSuperclass(Class<?> clazz, String superClassName) {
 		}
 		return Object.class;
 	}
+
+	/**
+	 * Checks whether the given type has the generic parameters declared in the class definition.
+	 *
+	 * @param t type to be validated
+	 */
+	public static void validateLambdaType(Class<?> baseClass, Type t) {
+		if (!(t instanceof Class)) {
+			return;
+		}
+		final Class<?> clazz = (Class<?>) t;
+
+		if (clazz.getTypeParameters().length > 0) {
+			throw new InvalidTypesException("The generic type parameters of '" + clazz.getSimpleName() + "' are missing. "
+				+ "In many cases lambda methods don't provide enough information for automatic type extraction when Java generics are involved. "
+				+ "An easy workaround is to use an (anonymous) class instead that implements the '" + baseClass.getName() + "' interface. "
+				+ "Otherwise the type has to be specified explicitly using type information.");
+		}
+	}
 }
diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
index f514384232f..07b6cfebbfa 100644
--- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
+++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
@@ -84,6 +84,12 @@
 /**
  * A utility for reflection analysis on classes, to determine the return type of implementations of transformation
  * functions.
+ *
+ * <p>NOTES FOR USERS OF THIS CLASS:
+ * Automatic type extraction is a hacky business that depends on a lot of variables such as generics,
+ * compiler, interfaces, etc. The type extraction fails regularly with either {@link MissingTypeInfo} or
+ * hard exceptions. Whenever you use methods of this class, make sure to provide a way to pass custom
+ * type information as a fallback.
  */
 @Public
 public class TypeExtractor {
@@ -171,7 +177,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 			MapFunction.class,
 			0,
 			1,
-			new int[]{0},
 			NO_INDEX,
 			inType,
 			functionName,
@@ -193,7 +198,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 			FlatMapFunction.class,
 			0,
 			1,
-			new int[]{0},
 			new int[]{1, 0},
 			inType,
 			functionName,
@@ -222,7 +226,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 			FoldFunction.class,
 			0,
 			1,
-			new int[]{1},
 			NO_INDEX,
 			inType,
 			functionName,
@@ -241,7 +244,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 			AggregateFunction.class,
 			0,
 			1,
-			new int[]{0},
 			NO_INDEX,
 			inType,
 			functionName,
@@ -261,7 +263,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 			0,
 			2,
 			NO_INDEX,
-			NO_INDEX,
 			inType,
 			functionName,
 			allowMissing);
@@ -281,7 +282,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 			MapPartitionFunction.class,
 			0,
 			1,
-			new int[]{0, 0},
 			new int[]{1, 0},
 			inType,
 			functionName,
@@ -302,7 +302,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 			GroupReduceFunction.class,
 			0,
 			1,
-			new int[]{0, 0},
 			new int[]{1, 0},
 			inType,
 			functionName,
@@ -323,7 +322,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 			GroupCombineFunction.class,
 			0,
 			1,
-			new int[]{0, 0},
 			new int[]{1, 0},
 			inType,
 			functionName,
@@ -347,8 +345,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 			0,
 			1,
 			2,
-			new int[]{0},
-			new int[]{1},
 			new int[]{2, 0},
 			in1Type,
 			in2Type,
@@ -373,8 +369,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 			0,
 			1,
 			2,
-			new int[]{0},
-			new int[]{1},
 			NO_INDEX,
 			in1Type,
 			in2Type,
@@ -399,8 +393,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 			0,
 			1,
 			2,
-			new int[]{0, 0},
-			new int[]{1, 0},
 			new int[]{2, 0},
 			in1Type,
 			in2Type,
@@ -425,8 +417,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 			0,
 			1,
 			2,
-			new int[]{0},
-			new int[]{1},
 			NO_INDEX,
 			in1Type,
 			in2Type,
@@ -448,7 +438,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 			KeySelector.class,
 			0,
 			1,
-			new int[]{0},
 			NO_INDEX,
 			inType,
 			functionName,
@@ -465,46 +454,16 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 		Partitioner<T> partitioner,
 		String functionName,
 		boolean allowMissing) {
-		try {
-			final LambdaExecutable exec;
-			try {
-				exec = checkAndExtractLambda(partitioner);
-			} catch (TypeExtractionException e) {
-				throw new InvalidTypesException("Internal error occurred.", e);
-			}
-			if (exec != null) {
-				// check for lambda type erasure
-				validateLambdaGenericParameters(exec);
-
-				// parameters must be accessed from behind, since JVM can add additional parameters e.g. when using local variables inside lambda function
-				// paramLen is the total number of parameters of the provided lambda, it includes parameters added through closure
-				final int paramLen = exec.getParameterTypes().length;
 
-				final Method sam = TypeExtractionUtils.getSingleAbstractMethod(Partitioner.class);
-				// number of parameters the SAM of implemented interface has; the parameter indexing applies to this range
-				final int baseParametersLen = sam.getParameterTypes().length;
-
-				final Type keyType = TypeExtractionUtils.extractTypeFromLambda(
-					exec,
-					new int[]{0},
-					paramLen,
-					baseParametersLen);
-				return new TypeExtractor().privateCreateTypeInfo(keyType, null, null);
-			} else {
-				return new TypeExtractor().privateCreateTypeInfo(
-					Partitioner.class,
-					partitioner.getClass(),
-					0,
-					null,
-					null);
-			}
-		} catch (InvalidTypesException e) {
-			if (allowMissing) {
-				return (TypeInformation<T>) new MissingTypeInfo(functionName != null ? functionName : partitioner.toString(), e);
-			} else {
-				throw e;
-			}
-		}
+		return getUnaryOperatorReturnType(
+			partitioner,
+			Partitioner.class,
+			-1,
+			0,
+			new int[]{0},
+			null,
+			functionName,
+			allowMissing);
 	}
 
 
@@ -524,24 +483,43 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 	/**
 	 * Returns the unary operator's return type.
 	 *
-	 * <p><b>NOTE:</b> lambda type indices allow extraction of Type from lambdas. To extract input type <b>IN</b>
-	 * from the function given below one should pass {@code new int[] {0,1,0}} as lambdaInputTypeArgumentIndices.
+	 * <p>This method can extract a type in 4 different ways:
+	 *
+	 * <p>1. By using the generics of the base class like MyFunction<X, Y, Z, IN, OUT>.
+	 *    This is what outputTypeArgumentIndex (in this example "4") is good for.
+	 *
+	 * <p>2. By using input type inference SubMyFunction<T, String, String, String, T>.
+	 *    This is what inputTypeArgumentIndex (in this example "0") and inType is good for.
+	 *
+	 * <p>3. By using the static method that a compiler generates for Java lambdas.
+	 *    This is what lambdaOutputTypeArgumentIndices is good for. Given that MyFunction has
+	 *    the following single abstract method:
 	 *
 	 * <pre>
 	 * <code>
-	 * OUT apply(Map<String, List<IN>> value)
+	 * void apply(IN value, Collector<OUT> value)
 	 * </code>
 	 * </pre>
 	 *
+	 * <p> Lambda type indices allow the extraction of a type from lambdas. To extract the
+	 *     output type <b>OUT</b> from the function one should pass {@code new int[] {1, 0}}.
+	 *     "1" for selecting the parameter and 0 for the first generic in this type.
+	 *     Use {@code TypeExtractor.NO_INDEX} for selecting the return type of the lambda for
+	 *     extraction or if the class cannot be a lambda because it is not a single abstract
+	 *     method interface.
+	 *
+	 * <p>4. By using interfaces such as {@link TypeInfoFactory} or {@link ResultTypeQueryable}.
+	 *
+	 * <p>See also comments in the header of this class.
+	 *
 	 * @param function Function to extract the return type from
 	 * @param baseClass Base class of the function
-	 * @param inputTypeArgumentIndex Index of input type in the class specification
-	 * @param outputTypeArgumentIndex Index of output type in the class specification
-	 * @param lambdaInputTypeArgumentIndices Table of indices of the type argument specifying the input type. See example.
+	 * @param inputTypeArgumentIndex Index of input generic type in the base class specification (ignored if inType is null)
+	 * @param outputTypeArgumentIndex Index of output generic type in the base class specification
 	 * @param lambdaOutputTypeArgumentIndices Table of indices of the type argument specifying the input type. See example.
-	 * @param inType Type of the input elements (In case of an iterable, it is the element type)
+	 * @param inType Type of the input elements (In case of an iterable, it is the element type) or null
 	 * @param functionName Function name
-	 * @param allowMissing Can the type information be missing
+	 * @param allowMissing Can the type information be missing (this generates a MissingTypeInfo for postponing an exception)
 	 * @param <IN> Input type
 	 * @param <OUT> Output type
 	 * @return TypeInformation of the return type of the function
@@ -553,11 +531,23 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 		Class<?> baseClass,
 		int inputTypeArgumentIndex,
 		int outputTypeArgumentIndex,
-		int[] lambdaInputTypeArgumentIndices,
 		int[] lambdaOutputTypeArgumentIndices,
 		TypeInformation<IN> inType,
 		String functionName,
 		boolean allowMissing) {
+
+		Preconditions.checkArgument(inType == null || inputTypeArgumentIndex >= 0, "Input type argument index was not provided");
+		Preconditions.checkArgument(outputTypeArgumentIndex >= 0, "Output type argument index was not provided");
+		Preconditions.checkArgument(
+			lambdaOutputTypeArgumentIndices != null,
+			"Indices for output type arguments within lambda not provided");
+
+		// explicit result type has highest precedence
+		if (function instanceof ResultTypeQueryable) {
+			return ((ResultTypeQueryable<OUT>) function).getProducedType();
+		}
+
+		// perform extraction
 		try {
 			final LambdaExecutable exec;
 			try {
@@ -566,14 +556,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 				throw new InvalidTypesException("Internal error occurred.", e);
 			}
 			if (exec != null) {
-				Preconditions.checkArgument(
-					lambdaInputTypeArgumentIndices != null && lambdaInputTypeArgumentIndices.length >= 1,
-					"Indices for input type arguments within lambda not provided");
-				Preconditions.checkArgument(
-					lambdaOutputTypeArgumentIndices != null,
-					"Indices for output type arguments within lambda not provided");
-				// check for lambda type erasure
-				validateLambdaGenericParameters(exec);
 
 				// parameters must be accessed from behind, since JVM can add additional parameters e.g. when using local variables inside lambda function
 				// paramLen is the total number of parameters of the provided lambda, it includes parameters added through closure
@@ -584,43 +566,23 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 				// number of parameters the SAM of implemented interface has; the parameter indexing applies to this range
 				final int baseParametersLen = sam.getParameterTypes().length;
 
-				// executable references "this" implicitly
-				if (paramLen <= 0) {
-					// executable declaring class can also be a super class of the input type
-					// we only validate if the executable exists in input type
-					validateInputContainsExecutable(exec, inType);
-				}
-				else {
-					final Type input = TypeExtractionUtils.extractTypeFromLambda(
-						exec,
-						lambdaInputTypeArgumentIndices,
-						paramLen,
-						baseParametersLen);
-					validateInputType(input, inType);
-				}
-
-				if (function instanceof ResultTypeQueryable) {
-					return ((ResultTypeQueryable<OUT>) function).getProducedType();
-				}
-
 				final Type output;
 				if (lambdaOutputTypeArgumentIndices.length > 0) {
 					output = TypeExtractionUtils.extractTypeFromLambda(
+						baseClass,
 						exec,
 						lambdaOutputTypeArgumentIndices,
 						paramLen,
 						baseParametersLen);
 				} else {
 					output = exec.getReturnType();
+					TypeExtractionUtils.validateLambdaType(baseClass, output);
 				}
 
 				return new TypeExtractor().privateCreateTypeInfo(output, inType, null);
 			} else {
-				Preconditions.checkArgument(inputTypeArgumentIndex >= 0, "Input type argument index was not provided");
-				Preconditions.checkArgument(outputTypeArgumentIndex >= 0, "Output type argument index was not provided");
-				validateInputType(baseClass, function.getClass(), inputTypeArgumentIndex, inType);
-				if(function instanceof ResultTypeQueryable) {
-					return ((ResultTypeQueryable<OUT>) function).getProducedType();
+				if (inType != null) {
+					validateInputType(baseClass, function.getClass(), inputTypeArgumentIndex, inType);
 				}
 				return new TypeExtractor().privateCreateTypeInfo(baseClass, function.getClass(), outputTypeArgumentIndex, inType, null);
 			}
@@ -637,27 +599,45 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 	/**
 	 * Returns the binary operator's return type.
 	 *
-	 * <p><b>NOTE:</b> lambda type indices allows extraction of Type from lambdas. To extract input type <b>IN1</b>
-	 * from the function given below one should pass {@code new int[] {0,1,0}} as lambdaInput1TypeArgumentIndices.
+	 * <p>This method can extract a type in 4 different ways:
+	 *
+	 * <p>1. By using the generics of the base class like MyFunction<X, Y, Z, IN, OUT>.
+	 *    This is what outputTypeArgumentIndex (in this example "4") is good for.
+	 *
+	 * <p>2. By using input type inference SubMyFunction<T, String, String, String, T>.
+	 *    This is what inputTypeArgumentIndex (in this example "0") and inType is good for.
+	 *
+	 * <p>3. By using the static method that a compiler generates for Java lambdas.
+	 *    This is what lambdaOutputTypeArgumentIndices is good for. Given that MyFunction has
+	 *    the following single abstract method:
 	 *
 	 * <pre>
 	 * <code>
-	 * OUT apply(Map<String, List<IN1>> value1, List<IN2> value2)
+	 * void apply(IN value, Collector<OUT> value)
 	 * </code>
 	 * </pre>
 	 *
+	 * <p> Lambda type indices allow the extraction of a type from lambdas. To extract the
+	 *     output type <b>OUT</b> from the function one should pass {@code new int[] {1, 0}}.
+	 *     "1" for selecting the parameter and 0 for the first generic in this type.
+	 *     Use {@code TypeExtractor.NO_INDEX} for selecting the return type of the lambda for
+	 *     extraction or if the class cannot be a lambda because it is not a single abstract
+	 *     method interface.
+	 *
+	 * <p>4. By using interfaces such as {@link TypeInfoFactory} or {@link ResultTypeQueryable}.
+	 *
+	 * <p>See also comments in the header of this class.
+	 *
 	 * @param function Function to extract the return type from
 	 * @param baseClass Base class of the function
-	 * @param input1TypeArgumentIndex Index of first input type in the class specification
-	 * @param input2TypeArgumentIndex Index of second input type in the class specification
-	 * @param outputTypeArgumentIndex Index of output type in the class specification
-	 * @param lambdaInput1TypeArgumentIndices Table of indices of the type argument specifying the first input type. See example.
-	 * @param lambdaInput2TypeArgumentIndices Table of indices of the type argument specifying the second input type. See example.
+	 * @param input1TypeArgumentIndex Index of first input generic type in the class specification (ignored if in1Type is null)
+	 * @param input2TypeArgumentIndex Index of second input generic type in the class specification (ignored if in2Type is null)
+	 * @param outputTypeArgumentIndex Index of output generic type in the class specification
 	 * @param lambdaOutputTypeArgumentIndices Table of indices of the type argument specifying the output type. See example.
 	 * @param in1Type Type of the left side input elements (In case of an iterable, it is the element type)
 	 * @param in2Type Type of the right side input elements (In case of an iterable, it is the element type)
 	 * @param functionName Function name
-	 * @param allowMissing Can the type information be missing
+	 * @param allowMissing Can the type information be missing (this generates a MissingTypeInfo for postponing an exception)
 	 * @param <IN1> Left side input type
 	 * @param <IN2> Right side input type
 	 * @param <OUT> Output type
@@ -671,13 +651,25 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 		int input1TypeArgumentIndex,
 		int input2TypeArgumentIndex,
 		int outputTypeArgumentIndex,
-		int[] lambdaInput1TypeArgumentIndices,
-		int[] lambdaInput2TypeArgumentIndices,
 		int[] lambdaOutputTypeArgumentIndices,
 		TypeInformation<IN1> in1Type,
 		TypeInformation<IN2> in2Type,
 		String functionName,
 		boolean allowMissing) {
+
+		Preconditions.checkArgument(in1Type == null || input1TypeArgumentIndex >= 0, "Input 1 type argument index was not provided");
+		Preconditions.checkArgument(in2Type == null || input2TypeArgumentIndex >= 0, "Input 2 type argument index was not provided");
+		Preconditions.checkArgument(outputTypeArgumentIndex >= 0, "Output type argument index was not provided");
+		Preconditions.checkArgument(
+			lambdaOutputTypeArgumentIndices != null,
+			"Indices for output type arguments within lambda not provided");
+
+		// explicit result type has highest precedence
+		if (function instanceof ResultTypeQueryable) {
+			return ((ResultTypeQueryable<OUT>) function).getProducedType();
+		}
+
+		// perform extraction
 		try {
 			final LambdaExecutable exec;
 			try {
@@ -686,17 +678,6 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 				throw new InvalidTypesException("Internal error occurred.", e);
 			}
 			if (exec != null) {
-				Preconditions.checkArgument(
-					lambdaInput1TypeArgumentIndices != null && lambdaInput1TypeArgumentIndices.length >= 1,
-					"Indices for first input type arguments within lambda not provided");
-				Preconditions.checkArgument(
-					lambdaInput2TypeArgumentIndices != null && lambdaInput2TypeArgumentIndices.length >= 1,
-					"Indices for second input type arguments within lambda not provided");
-				Preconditions.checkArgument(
-					lambdaOutputTypeArgumentIndices != null,
-					"Indices for output type arguments within lambda not provided");
-				// check for lambda type erasure
-				validateLambdaGenericParameters(exec);
 
 				final Method sam = TypeExtractionUtils.getSingleAbstractMethod(baseClass);
 				final int baseParametersLen = sam.getParameterTypes().length;
@@ -704,32 +685,17 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 				// parameters must be accessed from behind, since JVM can add additional parameters e.g. when using local variables inside lambda function
 				final int paramLen = exec.getParameterTypes().length;
 
-				final Type input1 = TypeExtractionUtils.extractTypeFromLambda(
-					exec,
-					lambdaInput1TypeArgumentIndices,
-					paramLen,
-					baseParametersLen);
-				final Type input2 = TypeExtractionUtils.extractTypeFromLambda(
-					exec,
-					lambdaInput2TypeArgumentIndices,
-					paramLen,
-					baseParametersLen);
-
-				validateInputType(input1, in1Type);
-				validateInputType(input2, in2Type);
-				if(function instanceof ResultTypeQueryable) {
-					return ((ResultTypeQueryable<OUT>) function).getProducedType();
-				}
-
 				final Type output;
 				if (lambdaOutputTypeArgumentIndices.length > 0) {
 					output = TypeExtractionUtils.extractTypeFromLambda(
+						baseClass,
 						exec,
 						lambdaOutputTypeArgumentIndices,
 						paramLen,
 						baseParametersLen);
 				} else {
 					output = exec.getReturnType();
+					TypeExtractionUtils.validateLambdaType(baseClass, output);
 				}
 
 				return new TypeExtractor().privateCreateTypeInfo(
@@ -738,13 +704,11 @@ private static void registerFactory(Type t, Class<? extends TypeInfoFactory> fac
 					in2Type);
 			}
 			else {
-				Preconditions.checkArgument(input1TypeArgumentIndex >= 0, "Input 1 type argument index was not provided");
-				Preconditions.checkArgument(input2TypeArgumentIndex >= 0, "Input 2 type argument index was not provided");
-				Preconditions.checkArgument(outputTypeArgumentIndex >= 0, "Output type argument index was not provided");
-				validateInputType(baseClass, function.getClass(), input1TypeArgumentIndex, in1Type);
-				validateInputType(baseClass, function.getClass(), input2TypeArgumentIndex, in2Type);
-				if(function instanceof ResultTypeQueryable) {
-					return ((ResultTypeQueryable<OUT>) function).getProducedType();
+				if (in1Type != null) {
+					validateInputType(baseClass, function.getClass(), input1TypeArgumentIndex, in1Type);
+				}
+				if (in2Type != null) {
+					validateInputType(baseClass, function.getClass(), input2TypeArgumentIndex, in2Type);
 				}
 				return new TypeExtractor().privateCreateTypeInfo(baseClass, function.getClass(), outputTypeArgumentIndex, in1Type, in2Type);
 			}
@@ -915,9 +879,10 @@ else if (t instanceof TypeVariable) {
 					return typeInfo;
 				} else {
 					throw new InvalidTypesException("Type of TypeVariable '" + ((TypeVariable<?>) t).getName() + "' in '"
-							+ ((TypeVariable<?>) t).getGenericDeclaration() + "' could not be determined. This is most likely a type erasure problem. "
-							+ "The type extraction currently supports types with generic variables only in cases where "
-							+ "all variables in the return type can be deduced from the input type(s).");
+						+ ((TypeVariable<?>) t).getGenericDeclaration() + "' could not be determined. This is most likely a type erasure problem. "
+						+ "The type extraction currently supports types with generic variables only in cases where "
+						+ "all variables in the return type can be deduced from the input type(s). "
+						+ "Otherwise the type has to be specified explicitly using type information.");
 				}
 			}
 		}
@@ -1165,10 +1130,11 @@ else if (fieldType instanceof ParameterizedType || fieldType instanceof GenericA
 				// variable could not be determined
 				if (subTypesInfo[i] == null && !lenient) {
 					throw new InvalidTypesException("Type of TypeVariable '" + ((TypeVariable<?>) subtypes[i]).getName() + "' in '"
-							+ ((TypeVariable<?>) subtypes[i]).getGenericDeclaration()
-							+ "' could not be determined. This is most likely a type erasure problem. "
-							+ "The type extraction currently supports types with generic variables only in cases where "
-							+ "all variables in the return type can be deduced from the input type(s).");
+						+ ((TypeVariable<?>) subtypes[i]).getGenericDeclaration()
+						+ "' could not be determined. This is most likely a type erasure problem. "
+						+ "The type extraction currently supports types with generic variables only in cases where "
+						+ "all variables in the return type can be deduced from the input type(s). "
+						+ "Otherwise the type has to be specified explicitly using type information.");
 				}
 			} else {
 				// create the type information of the subtype or null/exception
@@ -1618,30 +1584,6 @@ private int countFieldsInClass(Class<?> clazz) {
 		return fieldCount;
 	}
 
-	private static void validateLambdaGenericParameters(LambdaExecutable exec) {
-		// check the arguments
-		for (Type t : exec.getParameterTypes()) {
-			validateLambdaGenericParameter(t);
-		}
-
-		// check the return type
-		validateLambdaGenericParameter(exec.getReturnType());
-	}
-
-	private static void validateLambdaGenericParameter(Type t) {
-		if(!(t instanceof Class)) {
-			return;
-		}
-		final Class<?> clazz = (Class<?>) t;
-
-		if(clazz.getTypeParameters().length > 0) {
-			throw new InvalidTypesException("The generic type parameters of '" + clazz.getSimpleName() + "' are missing. \n"
-					+ "It seems that your compiler has not stored them into the .class file. \n"
-					+ "Currently, only the Eclipse JDT compiler preserves the type information necessary to use the lambdas feature type-safely. \n"
-					+ "See the documentation for more information about how to compile jobs containing lambda expressions.");
-		}
-	}
-
 	/**
 	 * Tries to find a concrete value (Class, ParameterizedType etc. ) for a TypeVariable by traversing the type hierarchy downwards.
 	 * If a value could not be found it will return the most bottom type variable in the hierarchy.
diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/NullableSerializer.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/NullableSerializer.java
new file mode 100644
index 00000000000..fe392e4b1cb
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/NullableSerializer.java
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.api.java.typeutils.runtime;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.typeutils.CompatibilityResult;
+import org.apache.flink.api.common.typeutils.CompatibilityUtil;
+import org.apache.flink.api.common.typeutils.CompositeTypeSerializerConfigSnapshot;
+import org.apache.flink.api.common.typeutils.TypeDeserializerAdapter;
+import org.apache.flink.api.common.typeutils.TypeSerializer;
+import org.apache.flink.api.common.typeutils.TypeSerializerConfigSnapshot;
+import org.apache.flink.api.common.typeutils.UnloadableDummyTypeSerializer;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.core.memory.DataInputDeserializer;
+import org.apache.flink.core.memory.DataInputView;
+import org.apache.flink.core.memory.DataOutputSerializer;
+import org.apache.flink.core.memory.DataOutputView;
+import org.apache.flink.util.Preconditions;
+
+import javax.annotation.Nonnull;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Serializer wrapper to add support of {@code null} value serialization.
+ *
+ * <p>If the target serializer does not support {@code null} values of its type,
+ * you can use this class to wrap this serializer.
+ * This is a generic treatment of {@code null} value serialization
+ * which comes with the cost of additional byte in the final serialized value.
+ * The {@code NullableSerializer} will intercept {@code null} value serialization case
+ * and prepend the target serialized value with a boolean flag marking whether it is {@code null} or not.
+ * <pre> {@code
+ * TypeSerializer<T> originalSerializer = ...;
+ * TypeSerializer<T> serializerWithNullValueSupport = NullableSerializer.wrap(originalSerializer);
+ * // or
+ * TypeSerializer<T> serializerWithNullValueSupport = NullableSerializer.wrapIfNullIsNotSupported(originalSerializer);
+ * }}</pre>
+ *
+ * @param <T> type to serialize
+ */
+public class NullableSerializer<T> extends TypeSerializer<T> {
+	private static final long serialVersionUID = 3335569358214720033L;
+	private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
+
+	@Nonnull
+	private final TypeSerializer<T> originalSerializer;
+	private final byte[] padding;
+
+	private NullableSerializer(@Nonnull TypeSerializer<T> originalSerializer, boolean padNullValueIfFixedLen) {
+		this.originalSerializer = originalSerializer;
+		this.padding = createPadding(originalSerializer.getLength(), padNullValueIfFixedLen);
+
+	}
+
+	private static <T> byte[] createPadding(int originalSerializerLength, boolean padNullValueIfFixedLen) {
+		boolean padNullValue = originalSerializerLength > 0 && padNullValueIfFixedLen;
+		return padNullValue ? new byte[originalSerializerLength] : EMPTY_BYTE_ARRAY;
+	}
+
+	/**
+	 * This method tries to serialize {@code null} value with the {@code originalSerializer}
+	 * and wraps it in case of {@link NullPointerException}, otherwise it returns the {@code originalSerializer}.
+	 *
+	 * @param originalSerializer serializer to wrap and add {@code null} support
+	 * @param padNullValueIfFixedLen pad null value to preserve the fixed length of original serializer
+	 * @return serializer which supports {@code null} values
+	 */
+	public static <T> TypeSerializer<T> wrapIfNullIsNotSupported(
+		@Nonnull TypeSerializer<T> originalSerializer, boolean padNullValueIfFixedLen) {
+		return checkIfNullSupported(originalSerializer) ?
+			originalSerializer : wrap(originalSerializer, padNullValueIfFixedLen);
+	}
+
+	/**
+	 * This method checks if {@code serializer} supports {@code null} value.
+	 *
+	 * @param serializer serializer to check
+	 */
+	public static <T> boolean checkIfNullSupported(@Nonnull TypeSerializer<T> serializer) {
+		int length = serializer.getLength() > 0 ? serializer.getLength() : 1;
+		DataOutputSerializer dos = new DataOutputSerializer(length);
+		try {
+			serializer.serialize(null, dos);
+		} catch (IOException | RuntimeException e) {
+			return false;
+		}
+		Preconditions.checkArgument(
+			serializer.getLength() < 0 || serializer.getLength() == dos.getCopyOfBuffer().length,
+			"The serialized form of the null value should have the same length " +
+				"as any other if the length is fixed in the serializer");
+		DataInputDeserializer dis = new DataInputDeserializer(dos.getSharedBuffer());
+		try {
+			Preconditions.checkArgument(serializer.deserialize(dis) == null);
+		} catch (IOException e) {
+			throw new RuntimeException(
+				String.format("Unexpected failure to deserialize just serialized null value with %s",
+					serializer.getClass().getName()), e);
+		}
+		Preconditions.checkArgument(
+			serializer.copy(null) == null,
+			"Serializer %s has to be able properly copy null value if it can serialize it",
+			serializer.getClass().getName());
+		return true;
+	}
+
+	private boolean padNullValue() {
+		return padding.length > 0;
+	}
+
+	/**
+	 * This method wraps the {@code originalSerializer} with the {@code NullableSerializer} if not already wrapped.
+	 *
+	 * @param originalSerializer serializer to wrap and add {@code null} support
+	 * @param padNullValueIfFixedLen pad null value to preserve the fixed length of original serializer
+	 * @return wrapped serializer which supports {@code null} values
+	 */
+	public static <T> TypeSerializer<T> wrap(
+		@Nonnull TypeSerializer<T> originalSerializer, boolean padNullValueIfFixedLen) {
+		return originalSerializer instanceof NullableSerializer ?
+			originalSerializer : new NullableSerializer<>(originalSerializer, padNullValueIfFixedLen);
+	}
+
+	@Override
+	public boolean isImmutableType() {
+		return originalSerializer.isImmutableType();
+	}
+
+	@Override
+	public TypeSerializer<T> duplicate() {
+		TypeSerializer<T> duplicateOriginalSerializer = originalSerializer.duplicate();
+		return duplicateOriginalSerializer == originalSerializer ?
+			this : new NullableSerializer<>(originalSerializer.duplicate(), padNullValue());
+	}
+
+	@Override
+	public T createInstance() {
+		return originalSerializer.createInstance();
+	}
+
+	@Override
+	public T copy(T from) {
+		return from == null ? null : originalSerializer.copy(from);
+	}
+
+	@Override
+	public T copy(T from, T reuse) {
+		return from == null ? null :
+			(reuse == null ? originalSerializer.copy(from) : originalSerializer.copy(from, reuse));
+	}
+
+	@Override
+	public int getLength() {
+		return padNullValue() ? 1 + padding.length : -1;
+	}
+
+	@Override
+	public void serialize(T record, DataOutputView target) throws IOException {
+		if (record == null) {
+			target.writeBoolean(true);
+			target.write(padding);
+		} else {
+			target.writeBoolean(false);
+			originalSerializer.serialize(record, target);
+		}
+	}
+
+	@Override
+	public T deserialize(DataInputView source) throws IOException {
+		boolean isNull = deserializeNull(source);
+		return isNull ? null : originalSerializer.deserialize(source);
+	}
+
+	@Override
+	public T deserialize(T reuse, DataInputView source) throws IOException {
+		boolean isNull = deserializeNull(source);
+		return isNull ? null : (reuse == null ?
+			originalSerializer.deserialize(source) : originalSerializer.deserialize(reuse, source));
+	}
+
+	private boolean deserializeNull(DataInputView source) throws IOException {
+		boolean isNull = source.readBoolean();
+		if (isNull) {
+			source.skipBytesToRead(padding.length);
+		}
+		return isNull;
+	}
+
+	@Override
+	public void copy(DataInputView source, DataOutputView target) throws IOException {
+		boolean isNull = source.readBoolean();
+		target.writeBoolean(isNull);
+		if (isNull) {
+			target.write(padding);
+		} else {
+			originalSerializer.copy(source, target);
+		}
+	}
+
+	@Override
+	public boolean equals(Object obj) {
+		return obj == this ||
+			(obj != null && obj.getClass() == getClass() &&
+				originalSerializer.equals(((NullableSerializer) obj).originalSerializer));
+	}
+
+	@Override
+	public boolean canEqual(Object obj) {
+		return (obj != null && obj.getClass() == getClass() &&
+			originalSerializer.canEqual(((NullableSerializer) obj).originalSerializer));
+	}
+
+	@Override
+	public int hashCode() {
+		return originalSerializer.hashCode();
+	}
+
+	@Override
+	public NullableSerializerConfigSnapshot<T> snapshotConfiguration() {
+		return new NullableSerializerConfigSnapshot<>(originalSerializer);
+	}
+
+	@Override
+	public CompatibilityResult<T> ensureCompatibility(TypeSerializerConfigSnapshot configSnapshot) {
+		if (configSnapshot instanceof NullableSerializerConfigSnapshot) {
+			List<Tuple2<TypeSerializer<?>, TypeSerializerConfigSnapshot>> previousKvSerializersAndConfigs =
+				((NullableSerializerConfigSnapshot) configSnapshot).getNestedSerializersAndConfigs();
+
+			CompatibilityResult<T> compatResult = CompatibilityUtil.resolveCompatibilityResult(
+				previousKvSerializersAndConfigs.get(0).f0,
+				UnloadableDummyTypeSerializer.class,
+				previousKvSerializersAndConfigs.get(0).f1,
+				originalSerializer);
+
+			if (!compatResult.isRequiresMigration()) {
+				return CompatibilityResult.compatible();
+			} else if (compatResult.getConvertDeserializer() != null) {
+				return CompatibilityResult.requiresMigration(
+					new NullableSerializer<>(
+						new TypeDeserializerAdapter<>(compatResult.getConvertDeserializer()), padNullValue()));
+			}
+		}
+
+		return CompatibilityResult.requiresMigration();
+	}
+
+	/**
+	 * Configuration snapshot for serializers of nullable types, containing the
+	 * configuration snapshot of its original serializer.
+	 */
+	@Internal
+	public static class NullableSerializerConfigSnapshot<T> extends CompositeTypeSerializerConfigSnapshot {
+		private static final int VERSION = 1;
+
+		/** This empty nullary constructor is required for deserializing the configuration. */
+		@SuppressWarnings("unused")
+		public NullableSerializerConfigSnapshot() {}
+
+		NullableSerializerConfigSnapshot(TypeSerializer<T> originalSerializer) {
+			super(originalSerializer);
+		}
+
+		@Override
+		public int getVersion() {
+			return VERSION;
+		}
+	}
+}
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/AkkaOptions.java b/flink-core/src/main/java/org/apache/flink/configuration/AkkaOptions.java
index 360ba860f6b..02234b9d259 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/AkkaOptions.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/AkkaOptions.java
@@ -19,6 +19,9 @@
 package org.apache.flink.configuration;
 
 import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.configuration.description.Description;
+
+import static org.apache.flink.configuration.description.LinkElement.link;
 
 /**
  * Akka configuration options.
@@ -42,10 +45,12 @@
 	public static final ConfigOption<String> WATCH_HEARTBEAT_INTERVAL = ConfigOptions
 		.key("akka.watch.heartbeat.interval")
 		.defaultValue(ASK_TIMEOUT.defaultValue())
-		.withDescription("Heartbeat interval for Akka’s DeathWatch mechanism to detect dead TaskManagers. If" +
-			" TaskManagers are wrongly marked dead because of lost or delayed heartbeat messages, then you should" +
-			" decrease this value or increase akka.watch.heartbeat.pause. A thorough description of Akka’s DeathWatch" +
-			" can be found <a href=\"http://doc.akka.io/docs/akka/snapshot/scala/remoting.html#failure-detector\">here</a>.");
+		.withDescription(Description.builder()
+			.text("Heartbeat interval for Akka’s DeathWatch mechanism to detect dead TaskManagers. If" +
+					" TaskManagers are wrongly marked dead because of lost or delayed heartbeat messages, then you" +
+					" should decrease this value or increase akka.watch.heartbeat.pause. A thorough description of" +
+					" Akka’s DeathWatch can be found %s",
+				link("http://doc.akka.io/docs/akka/snapshot/scala/remoting.html#failure-detector", "here")).build());
 
 	/**
 	 * The maximum acceptable Akka death watch heartbeat pause.
@@ -53,11 +58,14 @@
 	public static final ConfigOption<String> WATCH_HEARTBEAT_PAUSE = ConfigOptions
 		.key("akka.watch.heartbeat.pause")
 		.defaultValue("60 s")
-		.withDescription("Acceptable heartbeat pause for Akka’s DeathWatch mechanism. A low value does not allow an" +
-			" irregular heartbeat. If TaskManagers are wrongly marked dead because of lost or delayed heartbeat messages," +
-			" then you should increase this value or decrease akka.watch.heartbeat.interval. Higher value increases the" +
-			" time to detect a dead TaskManager. A thorough description of Akka’s DeathWatch can be found" +
-			" <a href=\"http://doc.akka.io/docs/akka/snapshot/scala/remoting.html#failure-detector\">here</a>.");
+		.withDescription(Description.builder()
+			.text("Acceptable heartbeat pause for Akka’s DeathWatch mechanism. A low value does not allow an" +
+					" irregular heartbeat. If TaskManagers are wrongly marked dead because of lost or delayed" +
+					" heartbeat messages, then you should increase this value or decrease akka.watch.heartbeat.interval." +
+					" Higher value increases the time to detect a dead TaskManager. A thorough description of Akka’s" +
+					" DeathWatch can be found %s",
+				link("http://doc.akka.io/docs/akka/snapshot/scala/remoting.html#failure-detector", "here")
+			).build());
 	/**
 	 * The Akka tcp connection timeout.
 	 */
@@ -112,9 +120,11 @@
 	public static final ConfigOption<Integer> WATCH_THRESHOLD = ConfigOptions
 		.key("akka.watch.threshold")
 		.defaultValue(12)
-		.withDescription("Threshold for the DeathWatch failure detector. A low value is prone to false positives whereas" +
-			" a high value increases the time to detect a dead TaskManager. A thorough description of Akka’s DeathWatch" +
-			" can be found <a href=\"http://doc.akka.io/docs/akka/snapshot/scala/remoting.html#failure-detector\">here</a>.");
+		.withDescription(Description.builder()
+			.text("Threshold for the DeathWatch failure detector. A low value is prone to false positives whereas" +
+					" a high value increases the time to detect a dead TaskManager. A thorough description of Akka’s" +
+					" DeathWatch can be found %s",
+				link("http://doc.akka.io/docs/akka/snapshot/scala/remoting.html#failure-detector", "here")).build());
 
 	/**
 	 * Override SSL support for the Akka transport.
@@ -184,4 +194,81 @@
 		.key("akka.retry-gate-closed-for")
 		.defaultValue(50L)
 		.withDescription("Milliseconds a gate should be closed for after a remote connection was disconnected.");
+
+	// ==================================================
+	// Configurations for fork-join-executor.
+	// ==================================================
+
+	public static final ConfigOption<Double> FORK_JOIN_EXECUTOR_PARALLELISM_FACTOR = ConfigOptions
+		.key("akka.fork-join-executor.parallelism-factor")
+		.defaultValue(2.0)
+		.withDescription(Description.builder()
+			.text("The parallelism factor is used to determine thread pool size using the" +
+				" following formula: ceil(available processors * factor). Resulting size" +
+				" is then bounded by the parallelism-min and parallelism-max values."
+			).build());
+
+	public static final ConfigOption<Integer> FORK_JOIN_EXECUTOR_PARALLELISM_MIN = ConfigOptions
+		.key("akka.fork-join-executor.parallelism-min")
+		.defaultValue(8)
+		.withDescription(Description.builder()
+			.text("Min number of threads to cap factor-based parallelism number to.").build());
+
+	public static final ConfigOption<Integer> FORK_JOIN_EXECUTOR_PARALLELISM_MAX = ConfigOptions
+		.key("akka.fork-join-executor.parallelism-max")
+		.defaultValue(64)
+		.withDescription(Description.builder()
+			.text("Max number of threads to cap factor-based parallelism number to.").build());
+
+	// ==================================================
+	// Configurations for client-socket-work-pool.
+	// ==================================================
+
+	public static final ConfigOption<Integer> CLIENT_SOCKET_WORKER_POOL_SIZE_MIN = ConfigOptions
+		.key("akka.client-socket-worker-pool.pool-size-min")
+		.defaultValue(1)
+		.withDescription(Description.builder()
+			.text("Min number of threads to cap factor-based number to.").build());
+
+	public static final ConfigOption<Integer> CLIENT_SOCKET_WORKER_POOL_SIZE_MAX = ConfigOptions
+		.key("akka.client-socket-worker-pool.pool-size-max")
+		.defaultValue(2)
+		.withDescription(Description.builder()
+			.text("Max number of threads to cap factor-based number to.").build());
+
+	public static final ConfigOption<Double> CLIENT_SOCKET_WORKER_POOL_SIZE_FACTOR = ConfigOptions
+		.key("akka.client-socket-worker-pool.pool-size-factor")
+		.defaultValue(1.0)
+		.withDescription(Description.builder()
+			.text("The pool size factor is used to determine thread pool size" +
+				" using the following formula: ceil(available processors * factor)." +
+				" Resulting size is then bounded by the pool-size-min and" +
+				" pool-size-max values."
+			).build());
+
+	// ==================================================
+	// Configurations for server-socket-work-pool.
+	// ==================================================
+
+	public static final ConfigOption<Integer> SERVER_SOCKET_WORKER_POOL_SIZE_MIN = ConfigOptions
+		.key("akka.server-socket-worker-pool.pool-size-min")
+		.defaultValue(1)
+		.withDescription(Description.builder()
+			.text("Min number of threads to cap factor-based number to.").build());
+
+	public static final ConfigOption<Integer> SERVER_SOCKET_WORKER_POOL_SIZE_MAX = ConfigOptions
+		.key("akka.server-socket-worker-pool.pool-size-max")
+		.defaultValue(2)
+		.withDescription(Description.builder()
+			.text("Max number of threads to cap factor-based number to.").build());
+
+	public static final ConfigOption<Double> SERVER_SOCKET_WORKER_POOL_SIZE_FACTOR = ConfigOptions
+		.key("akka.server-socket-worker-pool.pool-size-factor")
+		.defaultValue(1.0)
+		.withDescription(Description.builder()
+			.text("The pool size factor is used to determine thread pool size" +
+				" using the following formula: ceil(available processors * factor)." +
+				" Resulting size is then bounded by the pool-size-min and" +
+				" pool-size-max values."
+			).build());
 }
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/ConfigOption.java b/flink-core/src/main/java/org/apache/flink/configuration/ConfigOption.java
index 8e7d79b2301..be242f54e25 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/ConfigOption.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/ConfigOption.java
@@ -19,6 +19,7 @@
 package org.apache.flink.configuration;
 
 import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.configuration.description.Description;
 
 import java.util.Arrays;
 import java.util.Collections;
@@ -52,7 +53,7 @@
 	private final T defaultValue;
 
 	/** The description for this option. */
-	private final String description;
+	private final Description description;
 
 	// ------------------------------------------------------------------------
 
@@ -64,7 +65,7 @@
 	 */
 	ConfigOption(String key, T defaultValue) {
 		this.key = checkNotNull(key);
-		this.description = "";
+		this.description = Description.builder().text("").build();
 		this.defaultValue = defaultValue;
 		this.deprecatedKeys = EMPTY;
 	}
@@ -73,10 +74,28 @@
 	 * Creates a new config option with deprecated keys.
 	 *
 	 * @param key             The current key for that config option
+	 * @param description     Description for that option
 	 * @param defaultValue    The default value for this option
 	 * @param deprecatedKeys  The list of deprecated keys, in the order to be checked
+	 * @deprecated use version with {@link Description} instead
 	 */
+	@Deprecated
 	ConfigOption(String key, String description, T defaultValue, String... deprecatedKeys) {
+		this.key = checkNotNull(key);
+		this.description = Description.builder().text(description).build();
+		this.defaultValue = defaultValue;
+		this.deprecatedKeys = deprecatedKeys == null || deprecatedKeys.length == 0 ? EMPTY : deprecatedKeys;
+	}
+
+	/**
+	 * Creates a new config option with deprecated keys.
+	 *
+	 * @param key             The current key for that config option
+	 * @param description     Description for that option
+	 * @param defaultValue    The default value for this option
+	 * @param deprecatedKeys  The list of deprecated keys, in the order to be checked
+	 */
+	ConfigOption(String key, Description description, T defaultValue, String... deprecatedKeys) {
 		this.key = checkNotNull(key);
 		this.description = description;
 		this.defaultValue = defaultValue;
@@ -104,15 +123,26 @@
 	 * Creates a new config option, using this option's key and default value, and
 	 * adding the given description. The given description is used when generation the configuration documention.
 	 *
-	 * <p><b>NOTE:</b> You can use html to format the output of the generated cell.
-	 *
 	 * @param description The description for this option.
 	 * @return A new config option, with given description.
+	 * @deprecated use version with {@link Description}
 	 */
+	@Deprecated
 	public ConfigOption<T> withDescription(final String description) {
 		return new ConfigOption<>(key, description, defaultValue, deprecatedKeys);
 	}
 
+	/**
+	 * Creates a new config option, using this option's key and default value, and
+	 * adding the given description. The given description is used when generation the configuration documention.
+	 *
+	 * @param description The description for this option.
+	 * @return A new config option, with given description.
+	 */
+	public ConfigOption<T> withDescription(final Description description) {
+		return new ConfigOption<>(key, description, defaultValue, deprecatedKeys);
+	}
+
 	// ------------------------------------------------------------------------
 
 	/**
@@ -159,7 +189,7 @@ public boolean hasDeprecatedKeys() {
 	 * Returns the description of this option.
 	 * @return The option's description.
 	 */
-	public String description() {
+	public Description description() {
 		return description;
 	}
 
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/Configuration.java b/flink-core/src/main/java/org/apache/flink/configuration/Configuration.java
index 7d99fbb7696..00c4c38e820 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/Configuration.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/Configuration.java
@@ -729,6 +729,32 @@ else if (configOption.hasDeprecatedKeys()) {
 		}
 	}
 
+	/**
+	 * Removes given config option from the configuration.
+	 *
+	 * @param configOption config option to remove
+	 * @param <T> Type of the config option
+	 * @return true is config has been removed, false otherwise
+	 */
+	public <T> boolean removeConfig(ConfigOption<T> configOption){
+		synchronized (this.confData){
+			// try the current key
+			Object oldValue = this.confData.remove(configOption.key());
+			if (oldValue == null){
+				for (String deprecatedKey : configOption.deprecatedKeys()){
+					oldValue = this.confData.remove(deprecatedKey);
+					if (oldValue != null){
+						LOG.warn("Config uses deprecated configuration key '{}' instead of proper key '{}'",
+							deprecatedKey, configOption.key());
+						return true;
+					}
+				}
+				return false;
+			}
+			return true;
+		}
+	}
+
 
 	// --------------------------------------------------------------------------------------------
 
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/ConfigurationUtils.java b/flink-core/src/main/java/org/apache/flink/configuration/ConfigurationUtils.java
index 3d1d8300d87..1b308217770 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/ConfigurationUtils.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/ConfigurationUtils.java
@@ -31,6 +31,44 @@
 
 	private static final String[] EMPTY = new String[0];
 
+	/**
+	 * Get job manager's heap memory. This method will check the new key
+	 * {@link JobManagerOptions#JOB_MANAGER_HEAP_MEMORY} and
+	 * the old key {@link JobManagerOptions#JOB_MANAGER_HEAP_MEMORY_MB} for backwards compatibility.
+	 *
+	 * @param configuration the configuration object
+	 * @return the memory size of job manager's heap memory.
+	 */
+	public static MemorySize getJobManagerHeapMemory(Configuration configuration) {
+		if (configuration.containsKey(JobManagerOptions.JOB_MANAGER_HEAP_MEMORY.key())) {
+			return MemorySize.parse(configuration.getString(JobManagerOptions.JOB_MANAGER_HEAP_MEMORY));
+		} else if (configuration.containsKey(JobManagerOptions.JOB_MANAGER_HEAP_MEMORY_MB.key())) {
+			return MemorySize.parse(configuration.getInteger(JobManagerOptions.JOB_MANAGER_HEAP_MEMORY_MB) + "m");
+		} else {
+			//use default value
+			return MemorySize.parse(configuration.getString(JobManagerOptions.JOB_MANAGER_HEAP_MEMORY));
+		}
+	}
+
+	/**
+	 * Get task manager's heap memory. This method will check the new key
+	 * {@link TaskManagerOptions#TASK_MANAGER_HEAP_MEMORY} and
+	 * the old key {@link TaskManagerOptions#TASK_MANAGER_HEAP_MEMORY_MB} for backwards compatibility.
+	 *
+	 * @param configuration the configuration object
+	 * @return the memory size of task manager's heap memory.
+	 */
+	public static MemorySize getTaskManagerHeapMemory(Configuration configuration) {
+		if (configuration.containsKey(TaskManagerOptions.TASK_MANAGER_HEAP_MEMORY.key())) {
+			return MemorySize.parse(configuration.getString(TaskManagerOptions.TASK_MANAGER_HEAP_MEMORY));
+		} else if (configuration.containsKey(TaskManagerOptions.TASK_MANAGER_HEAP_MEMORY_MB.key())) {
+			return MemorySize.parse(configuration.getInteger(TaskManagerOptions.TASK_MANAGER_HEAP_MEMORY_MB) + "m");
+		} else {
+			//use default value
+			return MemorySize.parse(configuration.getString(TaskManagerOptions.TASK_MANAGER_HEAP_MEMORY));
+		}
+	}
+
 	/**
 	 * Extracts the task manager directories for temporary files as defined by
 	 * {@link org.apache.flink.configuration.CoreOptions#TMP_DIRS}.
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/CoreOptions.java b/flink-core/src/main/java/org/apache/flink/configuration/CoreOptions.java
index 656943f59bb..f4102042d2a 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/CoreOptions.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/CoreOptions.java
@@ -173,6 +173,28 @@
 			" TaskManager, and Zookeeper services (start-cluster.sh, stop-cluster.sh, start-zookeeper-quorum.sh," +
 			" stop-zookeeper-quorum.sh).");
 
+	/**
+	 * This options is here only for documentation generation, it is only
+	 * evaluated in the shell scripts.
+	 */
+	@SuppressWarnings("unused")
+	public static final ConfigOption<String> FLINK_HADOOP_CONF_DIR = ConfigOptions
+		.key("env.hadoop.conf.dir")
+		.noDefaultValue()
+		.withDescription("Path to hadoop configuration directory. It is required to read HDFS and/or YARN" +
+			" configuration. You can also set it via environment variable.");
+
+	/**
+	 * This options is here only for documentation generation, it is only
+	 * evaluated in the shell scripts.
+	 */
+	@SuppressWarnings("unused")
+	public static final ConfigOption<String> FLINK_YARN_CONF_DIR = ConfigOptions
+		.key("env.yarn.conf.dir")
+		.noDefaultValue()
+		.withDescription("Path to yarn configuration directory. It is required to run flink on YARN. You can also" +
+			" set it via environment variable.");
+
 	// ------------------------------------------------------------------------
 	//  generic io
 	// ------------------------------------------------------------------------
@@ -181,7 +203,7 @@
 	 * The config parameter defining the directories for temporary files, separated by
 	 * ",", "|", or the system's {@link java.io.File#pathSeparator}.
 	 */
-	@Documentation.OverrideDefault("System.getProperty(\"java.io.tmpdir\")")
+	@Documentation.OverrideDefault("'LOCAL_DIRS' on Yarn. '_FLINK_TMP_DIR' on Mesos. System.getProperty(\"java.io.tmpdir\") in standalone.")
 	public static final ConfigOption<String> TMP_DIRS =
 		key("io.tmp.dirs")
 			.defaultValue(System.getProperty("java.io.tmpdir"))
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/DelegatingConfiguration.java b/flink-core/src/main/java/org/apache/flink/configuration/DelegatingConfiguration.java
index 7b75c7a6999..1a637f65626 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/DelegatingConfiguration.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/DelegatingConfiguration.java
@@ -310,6 +310,11 @@ public Configuration clone() {
 		return prefixed;
 	}
 
+	@Override
+	public <T> boolean removeConfig(ConfigOption<T> configOption){
+		return backingConfig.removeConfig(configOption);
+	}
+
 	@Override
 	public boolean containsKey(String key) {
 		return backingConfig.containsKey(prefix + key);
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/HighAvailabilityOptions.java b/flink-core/src/main/java/org/apache/flink/configuration/HighAvailabilityOptions.java
index c8b8ae982a9..787efffa3ed 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/HighAvailabilityOptions.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/HighAvailabilityOptions.java
@@ -22,6 +22,7 @@
 import org.apache.flink.annotation.docs.ConfigGroup;
 import org.apache.flink.annotation.docs.ConfigGroups;
 import org.apache.flink.annotation.docs.Documentation;
+import org.apache.flink.configuration.description.Description;
 
 import static org.apache.flink.configuration.ConfigOptions.key;
 
@@ -157,7 +158,9 @@
 			key("high-availability.zookeeper.path.mesos-workers")
 			.defaultValue("/mesos-workers")
 			.withDeprecatedKeys("recovery.zookeeper.path.mesos-workers")
-			.withDescription("ZooKeeper root path (ZNode) for Mesos workers.");
+			.withDescription(Description.builder()
+				.text("The ZooKeeper root path for persisting the Mesos worker information.")
+				.build());
 
 	// ------------------------------------------------------------------------
 	//  ZooKeeper Client Settings
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/JobManagerOptions.java b/flink-core/src/main/java/org/apache/flink/configuration/JobManagerOptions.java
index f78ed9d367d..e845d607b6f 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/JobManagerOptions.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/JobManagerOptions.java
@@ -20,8 +20,10 @@
 
 import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.annotation.docs.Documentation;
+import org.apache.flink.configuration.description.Description;
 
 import static org.apache.flink.configuration.ConfigOptions.key;
+import static org.apache.flink.configuration.description.TextElement.text;
 
 /**
  * Configuration options for the JobManager.
@@ -107,7 +109,14 @@
 	public static final ConfigOption<String> EXECUTION_FAILOVER_STRATEGY =
 		key("jobmanager.execution.failover-strategy")
 			.defaultValue("full")
-			.withDescription("The maximum number of prior execution attempts kept in history.");
+			.withDescription(Description.builder()
+				.text("This option specifies how the job computation recovers from task failures. " +
+					"Accepted values are:")
+				.list(
+					text("'full': Restarts all tasks."),
+					text("'individual': Restarts only the failed task. Should only be used if all tasks are independent components."),
+					text("'region': Restarts all tasks that could be affected by the task failure.")
+				).build());
 
 	/**
 	 * This option specifies the interval in order to trigger a resource manager reconnection if the connection
@@ -145,11 +154,17 @@
 		.defaultValue(60L * 60L)
 		.withDescription("The time in seconds after which a completed job expires and is purged from the job store.");
 
+	/**
+	 * The timeout in milliseconds for requesting a slot from Slot Pool.
+	 */
 	public static final ConfigOption<Long> SLOT_REQUEST_TIMEOUT =
 		key("slot.request.timeout")
 		.defaultValue(5L * 60L * 1000L)
 		.withDescription("The timeout in milliseconds for requesting a slot from Slot Pool.");
 
+	/**
+	 * The timeout in milliseconds for a idle slot in Slot Pool.
+	 */
 	public static final ConfigOption<Long> SLOT_IDLE_TIMEOUT =
 		key("slot.idle.timeout")
 			// default matches heartbeat.timeout so that sticky allocation is not lost on timeouts for local recovery
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/MetricOptions.java b/flink-core/src/main/java/org/apache/flink/configuration/MetricOptions.java
index 3b11645fae8..fc6b3c14c46 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/MetricOptions.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/MetricOptions.java
@@ -19,8 +19,10 @@
 package org.apache.flink.configuration;
 
 import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.configuration.description.Description;
 
 import static org.apache.flink.configuration.ConfigOptions.key;
+import static org.apache.flink.configuration.description.TextElement.text;
 
 /**
  * Configuration options for metrics and metric reporters.
@@ -104,6 +106,24 @@
 			.defaultValue("<host>.taskmanager.<tm_id>.<job_name>.<operator_name>.<subtask_index>")
 			.withDescription("Defines the scope format string that is applied to all metrics scoped to an operator.");
 
+	public static final ConfigOption<Long> LATENCY_INTERVAL =
+		key("metrics.latency.interval")
+			.defaultValue(2000L)
+			.withDescription("Defines the interval at which latency tracking marks are emitted from the sources." +
+				" Disables latency tracking if set to 0 or a negative value. Enabling this feature can significantly" +
+				" impact the performance of the cluster.");
+
+	public static final ConfigOption<String> LATENCY_SOURCE_GRANULARITY =
+		key("metrics.latency.granularity")
+			.defaultValue("subtask")
+			.withDescription(Description.builder()
+				.text("Defines the granularity of latency metrics. Accepted values are:")
+				.list(
+					text("single - Track latency without differentiating between sources and subtasks."),
+					text("operator - Track latency while differentiating between sources, but not subtasks."),
+					text("subtask - Track latency while differentiating between sources and subtasks."))
+				.build());
+
 	/** The number of measured latencies to maintain at each operator. */
 	public static final ConfigOption<Integer> LATENCY_HISTORY_SIZE =
 		key("metrics.latency.history-size")
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/ResourceManagerOptions.java b/flink-core/src/main/java/org/apache/flink/configuration/ResourceManagerOptions.java
index 4ce49813bc5..5a203e3372b 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/ResourceManagerOptions.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/ResourceManagerOptions.java
@@ -19,16 +19,12 @@
 package org.apache.flink.configuration;
 
 import org.apache.flink.annotation.PublicEvolving;
-import org.apache.flink.annotation.docs.ConfigGroup;
-import org.apache.flink.annotation.docs.ConfigGroups;
+import org.apache.flink.configuration.description.Description;
 
 /**
  * The set of configuration options relating to the ResourceManager.
  */
 @PublicEvolving
-@ConfigGroups(groups = {
-	@ConfigGroup(name = "SlotManager", keyPrefix = "slotmanager")
-})
 public class ResourceManagerOptions {
 
 	/**
@@ -72,20 +68,35 @@
 
 	/**
 	 * The timeout for a slot request to be discarded, in milliseconds.
+	 * @deprecated Use {@link JobManagerOptions#SLOT_REQUEST_TIMEOUT}.
 	 */
+	@Deprecated
 	public static final ConfigOption<Long> SLOT_REQUEST_TIMEOUT = ConfigOptions
 		.key("slotmanager.request-timeout")
-		.defaultValue(600000L)
+		.defaultValue(-1L)
 		.withDescription("The timeout for a slot request to be discarded.");
 
 	/**
 	 * The timeout for an idle task manager to be released, in milliseconds.
+	 * @deprecated Use {@link #TASK_MANAGER_TIMEOUT}.
 	 */
-	public static final ConfigOption<Long> TASK_MANAGER_TIMEOUT = ConfigOptions
+	@Deprecated
+	public static final ConfigOption<Long> SLOT_MANAGER_TASK_MANAGER_TIMEOUT = ConfigOptions
 		.key("slotmanager.taskmanager-timeout")
 		.defaultValue(30000L)
 		.withDescription("The timeout for an idle task manager to be released.");
 
+	/**
+	 * The timeout for an idle task manager to be released, in milliseconds.
+	 */
+	public static final ConfigOption<Long> TASK_MANAGER_TIMEOUT = ConfigOptions
+		.key("resourcemanager.taskmanager-timeout")
+		.defaultValue(30000L)
+		.withDeprecatedKeys(SLOT_MANAGER_TASK_MANAGER_TIMEOUT.key())
+		.withDescription(Description.builder()
+			.text("The timeout for an idle task manager to be released.")
+			.build());
+
 	/**
 	 * Prefix for passing custom environment variables to Flink's master process.
 	 * For example for passing LD_LIBRARY_PATH as an env variable to the AppMaster, set:
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/RestOptions.java b/flink-core/src/main/java/org/apache/flink/configuration/RestOptions.java
index 1b2c39e75bd..5c1b6d681fa 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/RestOptions.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/RestOptions.java
@@ -34,6 +34,7 @@
 	public static final ConfigOption<String> BIND_ADDRESS =
 		key("rest.bind-address")
 			.noDefaultValue()
+			.withDeprecatedKeys(WebOptions.ADDRESS.key(), ConfigConstants.DEFAULT_JOB_MANAGER_WEB_FRONTEND_ADDRESS.key())
 			.withDescription("The address that the server binds itself.");
 
 	/**
@@ -92,6 +93,14 @@
 			.defaultValue(15_000L)
 			.withDescription("The maximum time in ms for the client to establish a TCP connection.");
 
+	/**
+	 * The maximum time in ms for a connection to stay idle before failing.
+	 */
+	public static final ConfigOption<Long> IDLENESS_TIMEOUT =
+		key("rest.idleness-timeout")
+			.defaultValue(5L * 60L * 1_000L) // 5 minutes
+			.withDescription("The maximum time in ms for a connection to stay idle before failing.");
+
 	/**
 	 * The maximum content length that the server will handle.
 	 */
@@ -108,4 +117,14 @@
 			.defaultValue(104_857_600)
 			.withDescription("The maximum content length in bytes that the client will handle.");
 
+	public static final ConfigOption<Integer> SERVER_NUM_THREADS =
+		key("rest.server.numThreads")
+			.defaultValue(4)
+			.withDescription("The number of threads for the asynchronous processing of requests.");
+
+	public static final ConfigOption<Integer> SERVER_THREAD_PRIORITY = key("rest.server.thread-priority")
+		.defaultValue(Thread.NORM_PRIORITY)
+		.withDescription("Thread priority of the REST server's executor for processing asynchronous requests. " +
+				"Lowering the thread priority will give Flink's main components more CPU time whereas " +
+				"increasing will allocate more time for the REST server's processing.");
 }
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/SecurityOptions.java b/flink-core/src/main/java/org/apache/flink/configuration/SecurityOptions.java
index 03ee4f2db60..87ab1713215 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/SecurityOptions.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/SecurityOptions.java
@@ -22,8 +22,10 @@
 import org.apache.flink.annotation.docs.ConfigGroup;
 import org.apache.flink.annotation.docs.ConfigGroups;
 import org.apache.flink.annotation.docs.Documentation;
+import org.apache.flink.configuration.description.Description;
 
 import static org.apache.flink.configuration.ConfigOptions.key;
+import static org.apache.flink.configuration.description.LinkElement.link;
 
 /**
  * The set of configuration options relating to security.
@@ -118,6 +120,14 @@
 			.defaultValue(false)
 			.withDescription("Turns on SSL for external communication via the REST endpoints.");
 
+	/**
+	 * Enable mututal SSL authentication for external REST endpoints.
+	 */
+	public static final ConfigOption<Boolean> SSL_REST_AUTHENTICATION_ENABLED =
+		key("security.ssl.rest.authentication-enabled")
+			.defaultValue(false)
+			.withDescription("Turns on mutual SSL authentication for external communication via the REST endpoints.");
+
 	// ----------------- certificates (internal + external) -------------------
 
 	/**
@@ -274,8 +284,12 @@
 	public static final ConfigOption<String> SSL_ALGORITHMS =
 		key("security.ssl.algorithms")
 			.defaultValue("TLS_RSA_WITH_AES_128_CBC_SHA")
-			.withDescription("The comma separated list of standard SSL algorithms to be supported. Read more" +
-				" <a href=\"http://docs.oracle.com/javase/8/docs/technotes/guides/security/StandardNames.html#ciphersuites\">here</a>.");
+			.withDescription(Description.builder()
+				.text("The comma separated list of standard SSL algorithms to be supported. Read more %s",
+					link(
+						"http://docs.oracle.com/javase/8/docs/technotes/guides/security/StandardNames.html#ciphersuites",
+						"here"))
+				.build());
 
 	/**
 	 * Flag to enable/disable hostname verification for the ssl connections.
@@ -284,4 +298,47 @@
 		key("security.ssl.verify-hostname")
 			.defaultValue(true)
 			.withDescription("Flag to enable peer’s hostname verification during ssl handshake.");
+
+	// ------------------------ ssl parameters --------------------------------
+
+	/**
+	 * SSL session cache size.
+	 */
+	public static final ConfigOption<Integer> SSL_INTERNAL_SESSION_CACHE_SIZE =
+		key("security.ssl.internal.session-cache-size")
+			.defaultValue(-1)
+			.withDescription("The size of the cache used for storing SSL session objects. "
+				+ "According to https://github.com/netty/netty/issues/832, you should always set "
+				+ "this to an appropriate number to not run into a bug with stalling IO threads "
+				+ "during garbage collection. (-1 = use system default).")
+		.withDeprecatedKeys("security.ssl.session-cache-size");
+
+	/**
+	 * SSL session timeout.
+	 */
+	public static final ConfigOption<Integer> SSL_INTERNAL_SESSION_TIMEOUT =
+		key("security.ssl.internal.session-timeout")
+			.defaultValue(-1)
+			.withDescription("The timeout (in ms) for the cached SSL session objects. (-1 = use system default)")
+			.withDeprecatedKeys("security.ssl.session-timeout");
+
+	/**
+	 * SSL session timeout during handshakes.
+	 */
+	public static final ConfigOption<Integer> SSL_INTERNAL_HANDSHAKE_TIMEOUT =
+		key("security.ssl.internal.handshake-timeout")
+			.defaultValue(-1)
+			.withDescription("The timeout (in ms) during SSL handshake. (-1 = use system default)")
+			.withDeprecatedKeys("security.ssl.handshake-timeout");
+
+	/**
+	 * SSL session timeout after flushing the <tt>close_notify</tt> message.
+	 */
+	public static final ConfigOption<Integer> SSL_INTERNAL_CLOSE_NOTIFY_FLUSH_TIMEOUT =
+		key("security.ssl.internal.close-notify-flush-timeout")
+			.defaultValue(-1)
+			.withDescription("The timeout (in ms) for flushing the `close_notify` that was triggered by closing a " +
+				"channel. If the `close_notify` was not flushed in the given timeout the channel will be closed " +
+				"forcibly. (-1 = use system default)")
+			.withDeprecatedKeys("security.ssl.close-notify-flush-timeout");
 }
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/UnmodifiableConfiguration.java b/flink-core/src/main/java/org/apache/flink/configuration/UnmodifiableConfiguration.java
index f92de1c05eb..0a1bcc42856 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/UnmodifiableConfiguration.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/UnmodifiableConfiguration.java
@@ -65,6 +65,12 @@ public final void addAll(Configuration other, String prefix) {
 		error();
 	}
 
+	@Override
+	public <T> boolean removeConfig(ConfigOption<T> configOption) {
+		error();
+		return false;
+	}
+
 	private void error(){
 		throw new UnsupportedOperationException("The configuration is unmodifiable; its contents cannot be changed.");
 	}
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/TableDescriptorValidator.scala b/flink-core/src/main/java/org/apache/flink/configuration/description/BlockElement.java
similarity index 77%
rename from flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/TableDescriptorValidator.scala
rename to flink-core/src/main/java/org/apache/flink/configuration/description/BlockElement.java
index e0fa6025811..c55b496bfb0 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/TableDescriptorValidator.scala
+++ b/flink-core/src/main/java/org/apache/flink/configuration/description/BlockElement.java
@@ -16,14 +16,11 @@
  * limitations under the License.
  */
 
-package org.apache.flink.table.descriptors
+package org.apache.flink.configuration.description;
 
 /**
-  * Validator for [[TableDescriptor]].
-  */
-class TableDescriptorValidator extends DescriptorValidator {
+ * Part of description that represents a block e.g. some text, linebreak or a list.
+ */
+public interface BlockElement extends DescriptionElement {
 
-  override def validate(properties: DescriptorProperties): Unit = {
-    // nothing to do
-  }
 }
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/description/Description.java b/flink-core/src/main/java/org/apache/flink/configuration/description/Description.java
new file mode 100644
index 00000000000..c00890d7573
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/configuration/description/Description.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.configuration.description;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Description for {@link org.apache.flink.configuration.ConfigOption}. Allows providing multiple rich formats.
+ */
+public class Description {
+
+	private final List<BlockElement> blocks;
+
+	public static DescriptionBuilder builder() {
+		return new DescriptionBuilder();
+	}
+
+	public List<BlockElement> getBlocks() {
+		return blocks;
+	}
+
+	/**
+	 * Builder for {@link Description}. Allows adding a rich formatting like lists, links, linebreaks etc.
+	 * For example:
+	 * <pre>{@code
+	 * Description description = Description.builder()
+	 * 	.text("This is some list: ")
+	 * 	.list(
+	 * 		text("this is first element of list"),
+	 * 		text("this is second element of list with a %s", link("https://link")))
+	 * 	.build();
+	 * }</pre>
+	 */
+	public static class DescriptionBuilder {
+
+		private final List<BlockElement> blocks = new ArrayList<>();
+
+		/**
+		 * Adds a block of text with placeholders ("%s") that will be replaced with proper string representation of
+		 * given {@link InlineElement}. For example:
+		 *
+		 * <p>{@code text("This is a text with a link %s", link("https://somepage", "to here"))}
+		 *
+		 * @param format   text with placeholders for elements
+		 * @param elements elements to be put in the text
+		 * @return description with added block of text
+		 */
+		public DescriptionBuilder text(String format, InlineElement... elements) {
+			blocks.add(TextElement.text(format, elements));
+			return this;
+		}
+
+		/**
+		 * Creates a simple block of text.
+		 *
+		 * @param text a simple block of text
+		 * @return block of text
+		 */
+		public DescriptionBuilder text(String text) {
+			blocks.add(TextElement.text(text));
+			return this;
+		}
+
+		/**
+		 * Block of description add.
+		 *
+		 * @param block block of description to add
+		 * @return block of description
+		 */
+		public DescriptionBuilder add(BlockElement block) {
+			blocks.add(block);
+			return this;
+		}
+
+		/**
+		 * Creates a line break in the description.
+		 */
+		public DescriptionBuilder linebreak() {
+			blocks.add(LineBreakElement.linebreak());
+			return this;
+		}
+
+		/**
+		 * Adds a bulleted list to the description.
+		 */
+		public DescriptionBuilder list(InlineElement... elements) {
+			blocks.add(ListElement.list(elements));
+			return this;
+		}
+
+		/**
+		 * Creates description representation.
+		 */
+		public Description build() {
+			return new Description(blocks);
+		}
+
+	}
+
+	private Description(List<BlockElement> blocks) {
+		this.blocks = blocks;
+	}
+}
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/description/DescriptionElement.java b/flink-core/src/main/java/org/apache/flink/configuration/description/DescriptionElement.java
new file mode 100644
index 00000000000..7a889f448bc
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/configuration/description/DescriptionElement.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.configuration.description;
+
+/**
+ * Part of a {@link Description} that can be converted into String representation.
+ */
+interface DescriptionElement {
+	/**
+	 * Transforms itself into String representation using given format.
+	 *
+	 * @param formatter formatter to use.
+	 */
+	void format(Formatter formatter);
+}
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/description/Formatter.java b/flink-core/src/main/java/org/apache/flink/configuration/description/Formatter.java
new file mode 100644
index 00000000000..fdf7db67ea5
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/configuration/description/Formatter.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.configuration.description;
+
+import java.util.EnumSet;
+
+/**
+ * Allows providing multiple formatters for the description. E.g. Html formatter, Markdown formatter etc.
+ */
+public abstract class Formatter {
+
+	private final StringBuilder state = new StringBuilder();
+
+	/**
+	 * Formats the description into a String using format specific tags.
+	 *
+	 * @param description description to be formatted
+	 * @return string representation of the description
+	 */
+	public String format(Description description) {
+		for (BlockElement blockElement : description.getBlocks()) {
+			blockElement.format(this);
+		}
+		return finalizeFormatting();
+	}
+
+	public void format(LinkElement element) {
+		formatLink(state, element.getLink(), element.getText());
+	}
+
+	public void format(TextElement element) {
+		String[] inlineElements = element.getElements().stream().map(el -> {
+				Formatter formatter = newInstance();
+				el.format(formatter);
+				return formatter.finalizeFormatting();
+			}
+		).toArray(String[]::new);
+		formatText(state, escapeFormatPlaceholder(element.getFormat()), inlineElements, element.getStyles());
+	}
+
+	public void format(LineBreakElement element) {
+		formatLineBreak(state);
+	}
+
+	public void format(ListElement element) {
+		String[] inlineElements = element.getEntries().stream().map(el -> {
+				Formatter formatter = newInstance();
+				el.format(formatter);
+				return formatter.finalizeFormatting();
+			}
+		).toArray(String[]::new);
+		formatList(state, inlineElements);
+	}
+
+	private String finalizeFormatting() {
+		String result = state.toString();
+		state.setLength(0);
+		return result.replaceAll("%%", "%");
+	}
+
+	protected abstract void formatLink(StringBuilder state, String link, String description);
+
+	protected abstract void formatLineBreak(StringBuilder state);
+
+	protected abstract void formatText(
+		StringBuilder state,
+		String format,
+		String[] elements,
+		EnumSet<TextElement.TextStyle> styles);
+
+	protected abstract void formatList(StringBuilder state, String[] entries);
+
+	protected abstract Formatter newInstance();
+
+	private static final String TEMPORARY_PLACEHOLDER = "randomPlaceholderForStringFormat";
+
+	private static String escapeFormatPlaceholder(String value) {
+		return value
+			.replaceAll("%s", TEMPORARY_PLACEHOLDER)
+			.replaceAll("%", "%%")
+			.replaceAll(TEMPORARY_PLACEHOLDER, "%s");
+	}
+
+}
+
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/description/HtmlFormatter.java b/flink-core/src/main/java/org/apache/flink/configuration/description/HtmlFormatter.java
new file mode 100644
index 00000000000..a47530315c5
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/configuration/description/HtmlFormatter.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.configuration.description;
+
+import java.util.EnumSet;
+
+/**
+ * Formatter that transforms {@link Description} into Html representation.
+ */
+public class HtmlFormatter extends Formatter {
+
+	@Override
+	protected void formatLink(StringBuilder state, String link, String description) {
+		state.append(String.format("<a href=\"%s\">%s</a>", link, description));
+	}
+
+	@Override
+	protected void formatLineBreak(StringBuilder state) {
+		state.append("<br/>");
+	}
+
+	@Override
+	protected void formatText(
+			StringBuilder state,
+			String format,
+			String[] elements,
+			EnumSet<TextElement.TextStyle> styles) {
+		String escapedFormat = escapeCharacters(format);
+
+		String prefix = "";
+		String suffix = "";
+		if (styles.contains(TextElement.TextStyle.CODE)) {
+			prefix = "<span markdown=\"span\">`";
+			suffix = "`</span>";
+		}
+		state.append(prefix);
+		state.append(String.format(escapedFormat, elements));
+		state.append(suffix);
+	}
+
+	@Override
+	protected void formatList(StringBuilder state, String[] entries) {
+		state.append("<ul>");
+		for (String entry : entries) {
+			state.append(String.format("<li>%s</li>", entry));
+		}
+		state.append("</ul>");
+	}
+
+	@Override
+	protected Formatter newInstance() {
+		return new HtmlFormatter();
+	}
+
+	private static String escapeCharacters(String value) {
+		return value
+			.replaceAll("<", "&lt;")
+			.replaceAll(">", "&gt;");
+	}
+
+}
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/description/InlineElement.java b/flink-core/src/main/java/org/apache/flink/configuration/description/InlineElement.java
new file mode 100644
index 00000000000..2a218758545
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/configuration/description/InlineElement.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.configuration.description;
+
+/**
+ * Part of description that represents an element inside a block e.g. a link.
+ */
+public interface InlineElement extends DescriptionElement {
+
+}
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/description/LineBreakElement.java b/flink-core/src/main/java/org/apache/flink/configuration/description/LineBreakElement.java
new file mode 100644
index 00000000000..4a3de9c3cb5
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/configuration/description/LineBreakElement.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.configuration.description;
+
+/**
+ * Represents a line break in the {@link Description}.
+ */
+public class LineBreakElement implements BlockElement {
+
+	/**
+	 * Creates a line break in the description.
+	 */
+	public static LineBreakElement linebreak() {
+		return new LineBreakElement();
+	}
+
+	private LineBreakElement() {
+	}
+
+	@Override
+	public void format(Formatter formatter) {
+		formatter.format(this);
+	}
+}
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/description/LinkElement.java b/flink-core/src/main/java/org/apache/flink/configuration/description/LinkElement.java
new file mode 100644
index 00000000000..778844eb559
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/configuration/description/LinkElement.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.configuration.description;
+
+/**
+ * Element that represents a link in the {@link Description}.
+ */
+public class LinkElement implements InlineElement {
+	private final String link;
+	private final String text;
+
+	/**
+	 * Creates a link with a given url and description.
+	 *
+	 * @param link address that this link should point to
+	 * @param text a description for that link, that should be used in text
+	 * @return link representation
+	 */
+	public static LinkElement link(String link, String text) {
+		return new LinkElement(link, text);
+	}
+
+	/**
+	 * Creates a link with a given url. This url will be used as a description for that link.
+	 *
+	 * @param link address that this link should point to
+	 * @return link representation
+	 */
+	public static LinkElement link(String link) {
+		return new LinkElement(link, link);
+	}
+
+	public String getLink() {
+		return link;
+	}
+
+	public String getText() {
+		return text;
+	}
+
+	private LinkElement(String link, String text) {
+		this.link = link;
+		this.text = text;
+	}
+
+	@Override
+	public void format(Formatter formatter) {
+		formatter.format(this);
+	}
+}
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/description/ListElement.java b/flink-core/src/main/java/org/apache/flink/configuration/description/ListElement.java
new file mode 100644
index 00000000000..1dea3ab49ec
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/configuration/description/ListElement.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.configuration.description;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Represents a list in the {@link Description}.
+ */
+public class ListElement implements BlockElement {
+
+	private final List<InlineElement> entries;
+
+	/**
+	 * Creates a list with blocks of text. For example:
+	 * <pre>{@code
+	 * .list(
+	 * 	text("this is first element of list"),
+	 * 	text("this is second element of list with a %s", link("https://link"))
+	 * )
+	 * }</pre>
+	 *
+	 * @param elements list of this list entries
+	 * @return list representation
+	 */
+	public static ListElement list(InlineElement... elements) {
+		return new ListElement(Arrays.asList(elements));
+	}
+
+	public List<InlineElement> getEntries() {
+		return entries;
+	}
+
+	private ListElement(List<InlineElement> entries) {
+		this.entries = entries;
+	}
+
+	@Override
+	public void format(Formatter formatter) {
+		formatter.format(this);
+	}
+}
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/description/TextElement.java b/flink-core/src/main/java/org/apache/flink/configuration/description/TextElement.java
new file mode 100644
index 00000000000..80bdfcf6cc7
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/configuration/description/TextElement.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.configuration.description;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.List;
+
+/**
+ * Represents a text block in the {@link Description}.
+ */
+public class TextElement implements BlockElement, InlineElement {
+	private final String format;
+	private final List<InlineElement> elements;
+	private final EnumSet<TextStyle> textStyles = EnumSet.noneOf(TextStyle.class);
+
+	/**
+	 * Creates a block of text with placeholders ("%s") that will be replaced with proper string representation of
+	 * given {@link InlineElement}. For example:
+	 *
+	 * <p>{@code text("This is a text with a link %s", link("https://somepage", "to here"))}
+	 *
+	 * @param format text with placeholders for elements
+	 * @param elements elements to be put in the text
+	 * @return block of text
+	 */
+	public static TextElement text(String format, InlineElement... elements) {
+		return new TextElement(format, Arrays.asList(elements));
+	}
+
+	/**
+	 * Creates a simple block of text.
+	 *
+	 * @param text a simple block of text
+	 * @return block of text
+	 */
+	public static TextElement text(String text) {
+		return new TextElement(text, Collections.emptyList());
+	}
+
+	/**
+	 * Creates a block of text formatted as code.
+	 *
+	 * @param text a block of text that will be formatted as code
+	 * @return block of text formatted as code
+	 */
+	public static TextElement code(String text) {
+		TextElement element = text(text);
+		element.textStyles.add(TextStyle.CODE);
+		return element;
+	}
+
+	public String getFormat() {
+		return format;
+	}
+
+	public List<InlineElement> getElements() {
+		return elements;
+	}
+
+	public EnumSet<TextStyle> getStyles() {
+		return textStyles;
+	}
+
+	private TextElement(String format, List<InlineElement> elements) {
+		this.format = format;
+		this.elements = elements;
+	}
+
+	@Override
+	public void format(Formatter formatter) {
+		formatter.format(this);
+	}
+
+	/**
+	 * Styles that can be applied to {@link TextElement} e.g. code, bold etc.
+	 */
+	public enum TextStyle {
+		CODE
+	}
+}
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/EntropyInjectingFileSystem.java b/flink-core/src/main/java/org/apache/flink/core/fs/EntropyInjectingFileSystem.java
new file mode 100644
index 00000000000..14a15bedafb
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/EntropyInjectingFileSystem.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.core.fs;
+
+import org.apache.flink.annotation.PublicEvolving;
+
+/**
+ * An interface to be implemented by a {@link FileSystem} that is aware of entropy injection.
+ *
+ * <p>Entropy injection is a technique to spread files/objects across more parallel shards of
+ * a distributed storage (typically object store) by adding random characters to the beginning
+ * of the path/key and hence spearing the keys across a wider domain of prefixes.
+ *
+ * <p>Entropy injection typically works by having a recognized marker string in paths
+ * and replacing that marker with random characters.
+ *
+ * <p>This interface is used in conjunction with the {@link EntropyInjector} (as a poor man's
+ * way to build a mix-in in Java).
+ */
+@PublicEvolving
+public interface EntropyInjectingFileSystem {
+
+	/**
+	 * Gets the marker string that represents the substring of a path to be replaced
+	 * by the entropy characters.
+	 */
+	String getEntropyInjectionKey();
+
+	/**
+	 * Creates a string with random entropy to be injected into a path.
+	 */
+	String generateEntropy();
+}
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/EntropyInjector.java b/flink-core/src/main/java/org/apache/flink/core/fs/EntropyInjector.java
new file mode 100644
index 00000000000..0fba138d71f
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/EntropyInjector.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.core.fs;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.core.fs.FileSystem.WriteMode;
+import org.apache.flink.util.FlinkRuntimeException;
+
+import javax.annotation.Nullable;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+/**
+ * This class offers utilities for entropy injection for FileSystems that implement
+ * {@link EntropyInjectingFileSystem}.
+ */
+@PublicEvolving
+public class EntropyInjector {
+
+	/**
+	 * Handles entropy injection across regular and entropy-aware file systems.
+	 *
+	 * <p>If the given file system is entropy-aware (a implements {@link EntropyInjectingFileSystem}),
+	 * then this method replaces the entropy marker in the path with random characters.
+	 * The entropy marker is defined by {@link EntropyInjectingFileSystem#getEntropyInjectionKey()}.
+	 *
+	 * <p>If the given file system does not implement {@code EntropyInjectingFileSystem},
+	 * then this method delegates to {@link FileSystem#create(Path, WriteMode)} and
+	 * returns the same path in the resulting {@code OutputStreamAndPath}.
+	 */
+	public static OutputStreamAndPath createEntropyAware(
+			FileSystem fs,
+			Path path,
+			WriteMode writeMode) throws IOException {
+
+		// check and possibly inject entropy into the path
+		final EntropyInjectingFileSystem efs = getEntropyFs(fs);
+		final Path processedPath = efs == null ? path : resolveEntropy(path, efs, true);
+
+		// create the stream on the original file system to let the safety net
+		// take its effect
+		final FSDataOutputStream out = fs.create(processedPath, writeMode);
+		return new OutputStreamAndPath(out, processedPath);
+	}
+
+	/**
+	 * Removes the entropy marker string from the path, if the given file system is an
+	 * entropy-injecting file system (implements {@link EntropyInjectingFileSystem}) and
+	 * the entropy marker key is present. Otherwise, this returns the path as is.
+	 *
+	 * @param path The path to filter.
+	 * @return The path without the marker string.
+	 */
+	public static Path removeEntropyMarkerIfPresent(FileSystem fs, Path path) {
+		final EntropyInjectingFileSystem efs = getEntropyFs(fs);
+		if (efs == null) {
+			return path;
+		}
+		else  {
+			try {
+				return resolveEntropy(path, efs, false);
+			}
+			catch (IOException e) {
+				// this should never happen, because the path was valid before and we only remove characters.
+				// rethrow to silence the compiler
+				throw new FlinkRuntimeException(e.getMessage(), e);
+			}
+		}
+	}
+
+	// ------------------------------------------------------------------------
+
+	@Nullable
+	private static EntropyInjectingFileSystem getEntropyFs(FileSystem fs) {
+		if (fs instanceof EntropyInjectingFileSystem) {
+			return (EntropyInjectingFileSystem) fs;
+		}
+		else if (fs instanceof SafetyNetWrapperFileSystem) {
+			FileSystem delegate = ((SafetyNetWrapperFileSystem) fs).getWrappedDelegate();
+			if (delegate instanceof EntropyInjectingFileSystem) {
+				return (EntropyInjectingFileSystem) delegate;
+			}
+			else {
+				return null;
+			}
+		}
+		else {
+			return null;
+		}
+	}
+
+	@VisibleForTesting
+	static Path resolveEntropy(Path path, EntropyInjectingFileSystem efs, boolean injectEntropy) throws IOException {
+		final String entropyInjectionKey = efs.getEntropyInjectionKey();
+
+		if (entropyInjectionKey == null) {
+			return path;
+		}
+		else {
+			final URI originalUri = path.toUri();
+			final String checkpointPath = originalUri.getPath();
+
+			final int indexOfKey = checkpointPath.indexOf(entropyInjectionKey);
+			if (indexOfKey == -1) {
+				return path;
+			}
+			else {
+				final StringBuilder buffer = new StringBuilder(checkpointPath.length());
+				buffer.append(checkpointPath, 0, indexOfKey);
+
+				if (injectEntropy) {
+					buffer.append(efs.generateEntropy());
+				}
+
+				buffer.append(checkpointPath, indexOfKey + entropyInjectionKey.length(), checkpointPath.length());
+
+				final String rewrittenPath = buffer.toString();
+				try {
+					return new Path(new URI(
+							originalUri.getScheme(),
+							originalUri.getAuthority(),
+							rewrittenPath,
+							originalUri.getQuery(),
+							originalUri.getFragment()).normalize());
+				}
+				catch (URISyntaxException e) {
+					// this could only happen if the injected entropy string contains invalid characters
+					throw new IOException("URI format error while processing path for entropy injection", e);
+				}
+			}
+		}
+	}
+
+	// ------------------------------------------------------------------------
+
+	/** This class is not meant to be instantiated. */
+	private EntropyInjector() {}
+}
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/OutputStreamAndPath.java b/flink-core/src/main/java/org/apache/flink/core/fs/OutputStreamAndPath.java
new file mode 100644
index 00000000000..62c9479e91b
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/OutputStreamAndPath.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.core.fs;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * An output stream and a path.
+ */
+public final class OutputStreamAndPath {
+
+	private final FSDataOutputStream stream;
+
+	private final Path path;
+
+	/**
+	 * Creates a OutputStreamAndPath.
+	 */
+	public OutputStreamAndPath(FSDataOutputStream stream, Path path) {
+		this.stream = checkNotNull(stream);
+		this.path = checkNotNull(path);
+	}
+
+	public FSDataOutputStream stream() {
+		return stream;
+	}
+
+	public Path path() {
+		return path;
+	}
+}
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/SafetyNetWrapperFileSystem.java b/flink-core/src/main/java/org/apache/flink/core/fs/SafetyNetWrapperFileSystem.java
index 92b3a74c2cc..04e63155893 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/SafetyNetWrapperFileSystem.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/SafetyNetWrapperFileSystem.java
@@ -64,6 +64,11 @@ public FileStatus getFileStatus(Path f) throws IOException {
 		return unsafeFileSystem.getFileStatus(f);
 	}
 
+	@Override
+	public RecoverableWriter createRecoverableWriter() throws IOException {
+		return unsafeFileSystem.createRecoverableWriter();
+	}
+
 	@Override
 	public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException {
 		return unsafeFileSystem.getFileBlockLocations(file, start, len);
diff --git a/flink-core/src/main/java/org/apache/flink/core/io/SimpleVersionedSerialization.java b/flink-core/src/main/java/org/apache/flink/core/io/SimpleVersionedSerialization.java
index 8bead11caeb..2c5b68c592c 100644
--- a/flink-core/src/main/java/org/apache/flink/core/io/SimpleVersionedSerialization.java
+++ b/flink-core/src/main/java/org/apache/flink/core/io/SimpleVersionedSerialization.java
@@ -110,7 +110,7 @@
 		checkNotNull(datum, "datum");
 
 		final byte[] data = serializer.serialize(datum);
-		final byte[] versionAndData = new byte[data.length + 4];
+		final byte[] versionAndData = new byte[data.length + 8];
 
 		final int version = serializer.getVersion();
 		versionAndData[0] = (byte) (version >> 24);
@@ -118,8 +118,14 @@
 		versionAndData[2] = (byte) (version >>  8);
 		versionAndData[3] = (byte)  version;
 
+		final int length = data.length;
+		versionAndData[4] = (byte) (length >> 24);
+		versionAndData[5] = (byte) (length >> 16);
+		versionAndData[6] = (byte) (length >>  8);
+		versionAndData[7] = (byte)  length;
+
 		// move the data to the array
-		System.arraycopy(data, 0, versionAndData, 4, data.length);
+		System.arraycopy(data, 0, versionAndData, 8, data.length);
 
 		return versionAndData;
 	}
@@ -142,14 +148,25 @@
 		checkNotNull(bytes, "bytes");
 		checkArgument(bytes.length >= 4, "byte array below minimum length (4 bytes)");
 
-		final byte[] dataOnly = Arrays.copyOfRange(bytes, 4, bytes.length);
+		final byte[] dataOnly = Arrays.copyOfRange(bytes, 8, bytes.length);
 		final int version =
 				((bytes[0] & 0xff) << 24) |
-						((bytes[1] & 0xff) << 16) |
-						((bytes[2] & 0xff) <<  8) |
-						(bytes[3] & 0xff);
-
-		return serializer.deserialize(version, dataOnly);
+				((bytes[1] & 0xff) << 16) |
+				((bytes[2] & 0xff) <<  8) |
+				(bytes[3] & 0xff);
+
+		final int length =
+				((bytes[4] & 0xff) << 24) |
+				((bytes[5] & 0xff) << 16) |
+				((bytes[6] & 0xff) <<  8) |
+				(bytes[7] & 0xff);
+
+		if (length == dataOnly.length) {
+			return serializer.deserialize(version, dataOnly);
+		}
+		else {
+			throw new IOException("Corrupt data, conflicting lengths. Length fields: " + length + ", data: " + dataOnly.length);
+		}
 	}
 
 	// ------------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/TestOrderedStore.java b/flink-core/src/main/java/org/apache/flink/core/memory/ByteArrayDataInputView.java
similarity index 50%
rename from flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/TestOrderedStore.java
rename to flink-core/src/main/java/org/apache/flink/core/memory/ByteArrayDataInputView.java
index 36a334122c4..33836f0c781 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/TestOrderedStore.java
+++ b/flink-core/src/main/java/org/apache/flink/core/memory/ByteArrayDataInputView.java
@@ -16,45 +16,41 @@
  * limitations under the License.
  */
 
-package org.apache.flink.runtime.state.heap;
-
-import org.apache.flink.util.CloseableIterator;
+package org.apache.flink.core.memory;
 
 import javax.annotation.Nonnull;
 
-import java.util.Comparator;
-import java.util.TreeSet;
-
 /**
- * Simple implementation of {@link org.apache.flink.runtime.state.heap.CachingInternalPriorityQueueSet.OrderedSetStore}
- * for tests.
+ * Reusable adapter to {@link DataInputView} that operates on given byte-arrays.
  */
-public class TestOrderedStore<T> implements CachingInternalPriorityQueueSet.OrderedSetStore<T> {
+public class ByteArrayDataInputView extends DataInputViewStreamWrapper {
+
+	@Nonnull
+	private final ByteArrayInputStreamWithPos inStreamWithPos;
 
-	private final TreeSet<T> treeSet;
+	public ByteArrayDataInputView() {
+		super(new ByteArrayInputStreamWithPos());
+		this.inStreamWithPos = (ByteArrayInputStreamWithPos) in;
+	}
 
-	public TestOrderedStore(Comparator<T> comparator) {
-		this.treeSet = new TreeSet<>(comparator);
+	public ByteArrayDataInputView(@Nonnull byte[] buffer) {
+		this(buffer, 0, buffer.length);
 	}
 
-	@Override
-	public void add(@Nonnull T element) {
-		treeSet.add(element);
+	public ByteArrayDataInputView(@Nonnull byte[] buffer, int offset, int length) {
+		this();
+		setData(buffer, offset, length);
 	}
 
-	@Override
-	public void remove(@Nonnull T element) {
-		treeSet.remove(element);
+	public int getPosition() {
+		return inStreamWithPos.getPosition();
 	}
 
-	@Override
-	public int size() {
-		return treeSet.size();
+	public void setPosition(int pos) {
+		inStreamWithPos.setPosition(pos);
 	}
 
-	@Nonnull
-	@Override
-	public CloseableIterator<T> orderedIterator() {
-		return CloseableIterator.adapterForIterator(treeSet.iterator());
+	public void setData(@Nonnull byte[] buffer, int offset, int length) {
+		inStreamWithPos.setBuffer(buffer, offset, length);
 	}
 }
diff --git a/flink-core/src/main/java/org/apache/flink/core/memory/ByteArrayDataOutputView.java b/flink-core/src/main/java/org/apache/flink/core/memory/ByteArrayDataOutputView.java
new file mode 100644
index 00000000000..a96f3d3fef1
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/core/memory/ByteArrayDataOutputView.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.core.memory;
+
+import javax.annotation.Nonnull;
+
+/**
+ * Adapter to {@link DataOutputView} that operates on a byte-array and offers read/write access to the current position.
+ */
+public class ByteArrayDataOutputView extends DataOutputViewStreamWrapper {
+
+	@Nonnull
+	private final ByteArrayOutputStreamWithPos outputStreamWithPos;
+
+	public ByteArrayDataOutputView() {
+		this(64);
+	}
+
+	public ByteArrayDataOutputView(int initialSize) {
+		super(new ByteArrayOutputStreamWithPos(initialSize));
+		this.outputStreamWithPos = (ByteArrayOutputStreamWithPos) out;
+	}
+
+	public void reset() {
+		outputStreamWithPos.reset();
+	}
+
+	@Nonnull
+	public byte[] toByteArray() {
+		return outputStreamWithPos.toByteArray();
+	}
+
+	public int getPosition() {
+		return outputStreamWithPos.getPosition();
+	}
+
+	public void setPosition(int position) {
+		outputStreamWithPos.setPosition(position);
+	}
+
+	@Nonnull
+	public byte[] getInternalBufferReference() {
+		return outputStreamWithPos.getBuf();
+	}
+}
diff --git a/flink-core/src/main/java/org/apache/flink/core/memory/ByteArrayInputStreamWithPos.java b/flink-core/src/main/java/org/apache/flink/core/memory/ByteArrayInputStreamWithPos.java
index 1447e9661c8..bc81593dea4 100644
--- a/flink-core/src/main/java/org/apache/flink/core/memory/ByteArrayInputStreamWithPos.java
+++ b/flink-core/src/main/java/org/apache/flink/core/memory/ByteArrayInputStreamWithPos.java
@@ -30,20 +30,23 @@
 @Internal
 public class ByteArrayInputStreamWithPos extends InputStream {
 
+	private static final byte[] EMPTY = new byte[0];
+
 	protected byte[] buffer;
 	protected int position;
 	protected int count;
 	protected int mark = 0;
 
+	public ByteArrayInputStreamWithPos() {
+		this(EMPTY);
+	}
+
 	public ByteArrayInputStreamWithPos(byte[] buffer) {
 		this(buffer, 0, buffer.length);
 	}
 
 	public ByteArrayInputStreamWithPos(byte[] buffer, int offset, int length) {
-		this.position = offset;
-		this.buffer = buffer;
-		this.mark = offset;
-		this.count = Math.min(buffer.length, offset + length);
+		setBuffer(buffer, offset, length);
 	}
 
 	@Override
@@ -122,4 +125,11 @@ public void setPosition(int pos) {
 		Preconditions.checkArgument(pos >= 0 && pos <= count, "Position out of bounds.");
 		this.position = pos;
 	}
+
+	public void setBuffer(byte[] buffer, int offset, int length) {
+		this.count = Math.min(buffer.length, offset + length);
+		setPosition(offset);
+		this.buffer = buffer;
+		this.mark = offset;
+	}
 }
diff --git a/flink-core/src/main/java/org/apache/flink/util/FileUtils.java b/flink-core/src/main/java/org/apache/flink/util/FileUtils.java
index 23af2e8cf84..8f322626116 100644
--- a/flink-core/src/main/java/org/apache/flink/util/FileUtils.java
+++ b/flink-core/src/main/java/org/apache/flink/util/FileUtils.java
@@ -28,6 +28,8 @@
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.WritableByteChannel;
 import java.nio.file.AccessDeniedException;
 import java.nio.file.Files;
 import java.nio.file.StandardOpenOption;
@@ -56,6 +58,14 @@
 
 	// ------------------------------------------------------------------------
 
+	public static void writeCompletely(WritableByteChannel channel, ByteBuffer src) throws IOException {
+		while (src.hasRemaining()) {
+			channel.write(src);
+		}
+	}
+
+	// ------------------------------------------------------------------------
+
 	/**
 	 * Constructs a random filename with the given prefix and
 	 * a random part generated from hex characters.
diff --git a/flink-core/src/main/java/org/apache/flink/util/StateMigrationException.java b/flink-core/src/main/java/org/apache/flink/util/StateMigrationException.java
index 00e0e73dc5c..12f3ee43583 100644
--- a/flink-core/src/main/java/org/apache/flink/util/StateMigrationException.java
+++ b/flink-core/src/main/java/org/apache/flink/util/StateMigrationException.java
@@ -24,6 +24,8 @@
 public class StateMigrationException extends FlinkException {
 	private static final long serialVersionUID = 8268516412747670839L;
 
+	public static final String MIGRATION_NOT_SUPPORTED_MSG = "State migration is currently not supported.";
+
 	public StateMigrationException(String message) {
 		super(message);
 	}
@@ -35,4 +37,8 @@ public StateMigrationException(Throwable cause) {
 	public StateMigrationException(String message, Throwable cause) {
 		super(message, cause);
 	}
+
+	public static StateMigrationException notSupported() {
+		return new StateMigrationException(MIGRATION_NOT_SUPPORTED_MSG);
+	}
 }
diff --git a/flink-core/src/main/java/org/apache/flink/util/StringUtils.java b/flink-core/src/main/java/org/apache/flink/util/StringUtils.java
index 208a30114d5..a5fff4cb2f0 100644
--- a/flink-core/src/main/java/org/apache/flink/util/StringUtils.java
+++ b/flink-core/src/main/java/org/apache/flink/util/StringUtils.java
@@ -30,6 +30,7 @@
 import java.util.Arrays;
 import java.util.Random;
 
+import static org.apache.flink.util.Preconditions.checkArgument;
 import static org.apache.flink.util.Preconditions.checkNotNull;
 
 /**
@@ -246,6 +247,38 @@ public static String getRandomString(Random rnd, int minLength, int maxLength, c
 		return new String(data);
 	}
 
+	/**
+	 * Creates a random alphanumeric string of given length.
+	 *
+	 * @param rnd The random number generator to use.
+	 * @param length The number of alphanumeric characters to append.
+	 */
+	public static String generateRandomAlphanumericString(Random rnd, int length) {
+		checkNotNull(rnd);
+		checkArgument(length >= 0);
+
+		StringBuilder buffer = new StringBuilder(length);
+		for (int i = 0; i < length; i++) {
+			buffer.append(nextAlphanumericChar(rnd));
+		}
+		return buffer.toString();
+	}
+
+	private static char nextAlphanumericChar(Random rnd) {
+		int which = rnd.nextInt(62);
+		char c;
+		if (which < 10) {
+			c = (char) ('0' + which);
+		}
+		else if (which < 36) {
+			c = (char) ('A' - 10 + which);
+		}
+		else {
+			c = (char) ('a' - 36 + which);
+		}
+		return c;
+	}
+
 	/**
 	 * Writes a String to the given output.
 	 * The written string can be read with {@link #readString(DataInputView)}.
diff --git a/flink-core/src/main/java/org/apache/flink/util/function/ConsumerWithException.java b/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureConsumerWithException.java
similarity index 76%
rename from flink-core/src/main/java/org/apache/flink/util/function/ConsumerWithException.java
rename to flink-core/src/main/java/org/apache/flink/util/concurrent/FutureConsumerWithException.java
index 09507d4e9f2..c49d7dc08d4 100644
--- a/flink-core/src/main/java/org/apache/flink/util/function/ConsumerWithException.java
+++ b/flink-core/src/main/java/org/apache/flink/util/concurrent/FutureConsumerWithException.java
@@ -16,19 +16,19 @@
  * limitations under the License.
  */
 
-package org.apache.flink.util.function;
-
-import org.apache.flink.util.ExceptionUtils;
+package org.apache.flink.util.concurrent;
 
+import java.util.concurrent.CompletionException;
 import java.util.function.Consumer;
 
 /**
- * A checked extension of the {@link Consumer} interface.
+ * A checked extension of the {@link Consumer} interface which rethrows
+ * exceptions wrapped in a {@link CompletionException}.
  *
  * @param <T> type of the first argument
  * @param <E> type of the thrown exception
  */
-public interface ConsumerWithException<T, E extends Throwable> extends Consumer<T> {
+public interface FutureConsumerWithException<T, E extends Throwable> extends Consumer<T> {
 
 	void acceptWithException(T value) throws E;
 
@@ -37,7 +37,7 @@ default void accept(T value) {
 		try {
 			acceptWithException(value);
 		} catch (Throwable t) {
-			ExceptionUtils.rethrow(t);
+			throw new CompletionException(t);
 		}
 	}
 }
diff --git a/flink-core/src/main/java/org/apache/flink/util/function/BiConsumerWithException.java b/flink-core/src/main/java/org/apache/flink/util/function/BiConsumerWithException.java
index 5864c8a985d..6fc5b76f246 100644
--- a/flink-core/src/main/java/org/apache/flink/util/function/BiConsumerWithException.java
+++ b/flink-core/src/main/java/org/apache/flink/util/function/BiConsumerWithException.java
@@ -30,7 +30,7 @@
  * @param <E> type of the thrown exception
  */
 @FunctionalInterface
-public interface BiConsumerWithException<T, U, E extends Throwable> extends BiConsumer<T, U> {
+public interface BiConsumerWithException<T, U, E extends Throwable> {
 
 	/**
 	 * Performs this operation on the given arguments.
@@ -39,14 +39,23 @@
 	 * @param u the second input argument
 	 * @throws E in case of an error
 	 */
-	void acceptWithException(T t, U u) throws E;
+	void accept(T t, U u) throws E;
 
-	@Override
-	default void accept(T t, U u) {
-		try {
-			acceptWithException(t, u);
-		} catch (Throwable e) {
-			ExceptionUtils.rethrow(e);
-		}
+	/**
+	 * Convert a {@link BiConsumerWithException} into a {@link BiConsumer}.
+	 *
+	 * @param biConsumerWithException BiConsumer with exception to convert into a {@link BiConsumer}.
+	 * @param <A> first input type
+	 * @param <B> second input type
+	 * @return {@link BiConsumer} which rethrows all checked exceptions as unchecked.
+	 */
+	static <A, B> BiConsumer<A, B> unchecked(BiConsumerWithException<A, B, ?> biConsumerWithException) {
+		return (A a, B b) -> {
+			try {
+				biConsumerWithException.accept(a, b);
+			} catch (Throwable t) {
+				ExceptionUtils.rethrow(t);
+			}
+		};
 	}
 }
diff --git a/flink-core/src/main/java/org/apache/flink/util/function/BiFunctionWithException.java b/flink-core/src/main/java/org/apache/flink/util/function/BiFunctionWithException.java
index 967c737e584..ccba8a7e774 100644
--- a/flink-core/src/main/java/org/apache/flink/util/function/BiFunctionWithException.java
+++ b/flink-core/src/main/java/org/apache/flink/util/function/BiFunctionWithException.java
@@ -31,7 +31,7 @@
  * @param <E> type of the exception which can be thrown
  */
 @FunctionalInterface
-public interface BiFunctionWithException<T, U, R, E extends Throwable> extends BiFunction<T, U, R> {
+public interface BiFunctionWithException<T, U, R, E extends Throwable> {
 
 	/**
 	 * Apply the given values t and u to obtain the resulting value. The operation can
@@ -42,16 +42,25 @@
 	 * @return result value
 	 * @throws E if the operation fails
 	 */
-	R applyWithException(T t, U u) throws E;
+	R apply(T t, U u) throws E;
 
-	default R apply(T t, U u) {
-		try {
-			return applyWithException(t, u);
-		} catch (Throwable e) {
-			ExceptionUtils.rethrow(e);
-			// we have to return a value to please the compiler
-			// but we will never reach the code here
-			return null;
-		}
+	/**
+	 * Convert at {@link BiFunctionWithException} into a {@link BiFunction}.
+	 *
+	 * @param biFunctionWithException function with exception to convert into a function
+	 * @param <A> input type
+	 * @param <B> output type
+	 * @return {@link BiFunction} which throws all checked exception as an unchecked exception.
+	 */
+	static <A, B, C> BiFunction<A, B, C> unchecked(BiFunctionWithException<A, B, C, ?> biFunctionWithException) {
+		return (A a, B b) -> {
+			try {
+				return biFunctionWithException.apply(a, b);
+			} catch (Throwable t) {
+				ExceptionUtils.rethrow(t);
+				// we need this to appease the compiler :-(
+				return null;
+			}
+		};
 	}
 }
diff --git a/flink-core/src/main/java/org/apache/flink/util/function/FunctionUtils.java b/flink-core/src/main/java/org/apache/flink/util/function/FunctionUtils.java
new file mode 100644
index 00000000000..b777308043f
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/util/function/FunctionUtils.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.util.function;
+
+import org.apache.flink.util.ExceptionUtils;
+
+import java.util.function.Consumer;
+import java.util.function.Function;
+
+/**
+ * Utility class for Flink's functions.
+ */
+public class FunctionUtils {
+
+	private FunctionUtils() {
+		throw new UnsupportedOperationException("This class should never be instantiated.");
+	}
+
+	private static final Function<Object, Void> NULL_FN = ignored -> null;
+
+	private static final Consumer<Object> IGNORE_FN = ignored -> {};
+
+	/**
+	 * Function which returns {@code null} (type: Void).
+	 *
+	 * @param <T> input type
+	 * @return Function which returns {@code null}.
+	 */
+	@SuppressWarnings("unchecked")
+	public static <T> Function<T, Void> nullFn() {
+		return (Function<T, Void>) NULL_FN;
+	}
+
+	/**
+	 * Consumer which ignores the input.
+	 *
+	 * @param <T> type of the input
+	 * @return Ignoring {@link Consumer}
+	 */
+	@SuppressWarnings("unchecked")
+	public static <T> Consumer<T> ignoreFn() {
+		return (Consumer<T>) IGNORE_FN;
+	}
+
+	/**
+	 * Convert at {@link FunctionWithException} into a {@link Function}.
+	 *
+	 * @param functionWithException function with exception to convert into a function
+	 * @param <A> input type
+	 * @param <B> output type
+	 * @return {@link Function} which throws all checked exception as an unchecked exception.
+	 */
+	public static <A, B> Function<A, B> uncheckedFunction(FunctionWithException<A, B, ?> functionWithException) {
+		return (A value) -> {
+			try {
+				return functionWithException.apply(value);
+			} catch (Throwable t) {
+				ExceptionUtils.rethrow(t);
+				// we need this to appease the compiler :-(
+				return null;
+			}
+		};
+	}
+
+	/**
+	 * Converts a {@link ThrowingConsumer} into a {@link Consumer} which throws checked exceptions
+	 * as unchecked.
+	 *
+	 * @param throwingConsumer to convert into a {@link Consumer}
+	 * @param <A> input type
+	 * @return {@link Consumer} which throws all checked exceptions as unchecked
+	 */
+	public static <A> Consumer<A> uncheckedConsumer(ThrowingConsumer<A, ?> throwingConsumer) {
+		return (A value) -> {
+			try {
+				throwingConsumer.accept(value);
+			} catch (Throwable t) {
+				ExceptionUtils.rethrow(t);
+			}
+		};
+	}
+}
diff --git a/flink-core/src/main/java/org/apache/flink/util/function/ThrowingRunnable.java b/flink-core/src/main/java/org/apache/flink/util/function/ThrowingRunnable.java
index 4fef4207838..0dd4047a1e5 100644
--- a/flink-core/src/main/java/org/apache/flink/util/function/ThrowingRunnable.java
+++ b/flink-core/src/main/java/org/apache/flink/util/function/ThrowingRunnable.java
@@ -19,6 +19,7 @@
 package org.apache.flink.util.function;
 
 import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.util.ExceptionUtils;
 
 /**
  * Similar to a {@link Runnable}, this interface is used to capture a block of code
@@ -35,4 +36,21 @@
 	 * @throws E Exceptions may be thrown.
 	 */
 	void run() throws E;
+
+	/**
+	 * Converts a {@link ThrowingRunnable} into a {@link Runnable} which throws all checked exceptions
+	 * as unchecked.
+	 *
+	 * @param throwingRunnable to convert into a {@link Runnable}
+	 * @return {@link Runnable} which throws all checked exceptions as unchecked.
+	 */
+	static Runnable unchecked(ThrowingRunnable<?> throwingRunnable) {
+		return () -> {
+			try {
+				throwingRunnable.run();
+			} catch (Throwable t) {
+				ExceptionUtils.rethrow(t);
+			}
+		};
+	}
 }
diff --git a/flink-core/src/main/java/org/apache/flink/util/function/TriConsumer.java b/flink-core/src/main/java/org/apache/flink/util/function/TriConsumer.java
new file mode 100644
index 00000000000..181a9bd624f
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/util/function/TriConsumer.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.util.function;
+
+import org.apache.flink.annotation.PublicEvolving;
+
+/**
+ * Operation which is performed on three given arguments.
+ *
+ * @param <S> type of the first argument
+ * @param <T> type of the second argument
+ * @param <U> type of the third argument
+ */
+@PublicEvolving
+@FunctionalInterface
+public interface TriConsumer<S, T, U> {
+
+	/**
+	 * Performs this operation on the given arguments.
+	 *
+	 * @param s first argument
+	 * @param t second argument
+	 * @param u third argument
+	 */
+	void accept(S s, T t, U u);
+}
diff --git a/flink-core/src/main/java/org/apache/flink/util/function/TriFunction.java b/flink-core/src/main/java/org/apache/flink/util/function/TriFunction.java
new file mode 100644
index 00000000000..7e1e967a86d
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/util/function/TriFunction.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.util.function;
+
+import org.apache.flink.annotation.PublicEvolving;
+
+/**
+ * Function which takes three arguments.
+ *
+ * @param <S> type of the first argument
+ * @param <T> type of the second argument
+ * @param <U> type of the third argument
+ * @param <R> type of the return value
+ */
+@PublicEvolving
+@FunctionalInterface
+public interface TriFunction<S, T, U, R> {
+
+	/**
+	 * Applies this function to the given arguments.
+	 *
+	 * @param s the first function argument
+	 * @param t the second function argument
+	 * @param u the third function argument
+	 * @return the function result
+	 */
+	R apply(S s, T t, U u);
+}
diff --git a/flink-core/src/test/java/org/apache/flink/api/common/typeutils/SerializerTestBase.java b/flink-core/src/test/java/org/apache/flink/api/common/typeutils/SerializerTestBase.java
index 57015c78be0..1997866fb3c 100644
--- a/flink-core/src/test/java/org/apache/flink/api/common/typeutils/SerializerTestBase.java
+++ b/flink-core/src/test/java/org/apache/flink/api/common/typeutils/SerializerTestBase.java
@@ -32,6 +32,7 @@
 import java.io.IOException;
 import java.util.Arrays;
 
+import org.apache.flink.api.java.typeutils.runtime.NullableSerializer;
 import org.apache.flink.core.memory.DataInputViewStreamWrapper;
 import org.apache.flink.core.memory.DataOutputViewStreamWrapper;
 import org.apache.flink.util.InstantiationUtil;
@@ -53,23 +54,23 @@
  * internal state would be corrupt, which becomes evident when toString is called.
  */
 public abstract class SerializerTestBase<T> extends TestLogger {
-	
+
 	protected abstract TypeSerializer<T> createSerializer();
 
 	/**
 	 * Gets the expected length for the serializer's {@link TypeSerializer#getLength()} method.
-	 * 
+	 *
 	 * <p>The expected length should be positive, for fix-length data types, or {@code -1} for
 	 * variable-length types.
 	 */
 	protected abstract int getLength();
-	
+
 	protected abstract Class<T> getTypeClass();
-	
+
 	protected abstract T[] getTestData();
 
 	// --------------------------------------------------------------------------------------------
-	
+
 	@Test
 	public void testInstantiate() {
 		try {
@@ -80,13 +81,13 @@ public void testInstantiate() {
 			}
 			T instance = serializer.createInstance();
 			assertNotNull("The created instance must not be null.", instance);
-			
+
 			Class<T> type = getTypeClass();
 			assertNotNull("The test is corrupt: type class is null.", type);
 
 			if (!type.isAssignableFrom(instance.getClass())) {
 				fail("Type of the instantiated object is wrong. " +
-						"Expected Type: " + type + " present type " + instance.getClass());
+					"Expected Type: " + type + " present type " + instance.getClass());
 			}
 		}
 		catch (Exception e) {
@@ -127,7 +128,7 @@ public void testSnapshotConfigurationAndReconfigure() throws Exception {
 		strategy = getSerializer().ensureCompatibility(new TestIncompatibleSerializerConfigSnapshot());
 		assertTrue(strategy.isRequiresMigration());
 	}
-	
+
 	@Test
 	public void testGetLength() {
 		final int len = getLength();
@@ -146,16 +147,16 @@ public void testGetLength() {
 			fail("Exception in test: " + e.getMessage());
 		}
 	}
-	
+
 	@Test
 	public void testCopy() {
 		try {
 			TypeSerializer<T> serializer = getSerializer();
 			T[] testData = getData();
-			
+
 			for (T datum : testData) {
 				T copy = serializer.copy(datum);
-				copy.toString();
+				checkToString(copy);
 				deepEquals("Copied element is not equal to the original element.", datum, copy);
 			}
 		}
@@ -165,16 +166,16 @@ public void testCopy() {
 			fail("Exception in test: " + e.getMessage());
 		}
 	}
-	
+
 	@Test
 	public void testCopyIntoNewElements() {
 		try {
 			TypeSerializer<T> serializer = getSerializer();
 			T[] testData = getData();
-			
+
 			for (T datum : testData) {
 				T copy = serializer.copy(datum, serializer.createInstance());
-				copy.toString();
+				checkToString(copy);
 				deepEquals("Copied element is not equal to the original element.", datum, copy);
 			}
 		}
@@ -184,18 +185,18 @@ public void testCopyIntoNewElements() {
 			fail("Exception in test: " + e.getMessage());
 		}
 	}
-	
+
 	@Test
 	public void testCopyIntoReusedElements() {
 		try {
 			TypeSerializer<T> serializer = getSerializer();
 			T[] testData = getData();
-			
+
 			T target = serializer.createInstance();
-			
+
 			for (T datum : testData) {
 				T copy = serializer.copy(datum, target);
-				copy.toString();
+				checkToString(copy);
 				deepEquals("Copied element is not equal to the original element.", datum, copy);
 				target = copy;
 			}
@@ -206,25 +207,25 @@ public void testCopyIntoReusedElements() {
 			fail("Exception in test: " + e.getMessage());
 		}
 	}
-	
+
 	@Test
 	public void testSerializeIndividually() {
 		try {
 			TypeSerializer<T> serializer = getSerializer();
 			T[] testData = getData();
-			
+
 			for (T value : testData) {
 				TestOutputView out = new TestOutputView();
 				serializer.serialize(value, out);
 				TestInputView in = out.getInputView();
-				
+
 				assertTrue("No data available during deserialization.", in.available() > 0);
-				
+
 				T deserialized = serializer.deserialize(serializer.createInstance(), in);
- 				deserialized.toString();
+				checkToString(deserialized);
 
 				deepEquals("Deserialized value if wrong.", value, deserialized);
-				
+
 				assertTrue("Trailing data available after deserialization.", in.available() == 0);
 			}
 		}
@@ -241,23 +242,23 @@ public void testSerializeIndividuallyReusingValues() {
 		try {
 			TypeSerializer<T> serializer = getSerializer();
 			T[] testData = getData();
-			
+
 			T reuseValue = serializer.createInstance();
-			
+
 			for (T value : testData) {
 				TestOutputView out = new TestOutputView();
 				serializer.serialize(value, out);
 				TestInputView in = out.getInputView();
-				
+
 				assertTrue("No data available during deserialization.", in.available() > 0);
-				
+
 				T deserialized = serializer.deserialize(reuseValue, in);
-				deserialized.toString();
+				checkToString(deserialized);
 
 				deepEquals("Deserialized value if wrong.", value, deserialized);
-				
+
 				assertTrue("Trailing data available after deserialization.", in.available() == 0);
-				
+
 				reuseValue = deserialized;
 			}
 		}
@@ -267,29 +268,29 @@ public void testSerializeIndividuallyReusingValues() {
 			fail("Exception in test: " + e.getMessage());
 		}
 	}
-	
+
 	@Test
 	public void testSerializeAsSequenceNoReuse() {
 		try {
 			TypeSerializer<T> serializer = getSerializer();
 			T[] testData = getData();
-			
+
 			TestOutputView out = new TestOutputView();
 			for (T value : testData) {
 				serializer.serialize(value, out);
 			}
-			
+
 			TestInputView in = out.getInputView();
-			
+
 			int num = 0;
 			while (in.available() > 0) {
 				T deserialized = serializer.deserialize(in);
-				deserialized.toString();
+				checkToString(deserialized);
 
 				deepEquals("Deserialized value if wrong.", testData[num], deserialized);
 				num++;
 			}
-			
+
 			assertEquals("Wrong number of elements deserialized.", testData.length, num);
 		}
 		catch (Exception e) {
@@ -298,31 +299,31 @@ public void testSerializeAsSequenceNoReuse() {
 			fail("Exception in test: " + e.getMessage());
 		}
 	}
-	
+
 	@Test
 	public void testSerializeAsSequenceReusingValues() {
 		try {
 			TypeSerializer<T> serializer = getSerializer();
 			T[] testData = getData();
-			
+
 			TestOutputView out = new TestOutputView();
 			for (T value : testData) {
 				serializer.serialize(value, out);
 			}
-			
+
 			TestInputView in = out.getInputView();
 			T reuseValue = serializer.createInstance();
-			
+
 			int num = 0;
 			while (in.available() > 0) {
 				T deserialized = serializer.deserialize(reuseValue, in);
-				deserialized.toString();
+				checkToString(deserialized);
 
 				deepEquals("Deserialized value if wrong.", testData[num], deserialized);
 				reuseValue = deserialized;
 				num++;
 			}
-			
+
 			assertEquals("Wrong number of elements deserialized.", testData.length, num);
 		}
 		catch (Exception e) {
@@ -331,30 +332,30 @@ public void testSerializeAsSequenceReusingValues() {
 			fail("Exception in test: " + e.getMessage());
 		}
 	}
-	
+
 	@Test
 	public void testSerializedCopyIndividually() {
 		try {
 			TypeSerializer<T> serializer = getSerializer();
 			T[] testData = getData();
-			
+
 			for (T value : testData) {
 				TestOutputView out = new TestOutputView();
 				serializer.serialize(value, out);
-				
+
 				TestInputView source = out.getInputView();
 				TestOutputView target = new TestOutputView();
 				serializer.copy(source, target);
-				
+
 				TestInputView toVerify = target.getInputView();
-				
+
 				assertTrue("No data available copying.", toVerify.available() > 0);
-				
+
 				T deserialized = serializer.deserialize(serializer.createInstance(), toVerify);
-				deserialized.toString();
+				checkToString(deserialized);
 
 				deepEquals("Deserialized value if wrong.", value, deserialized);
-				
+
 				assertTrue("Trailing data available after deserialization.", toVerify.available() == 0);
 			}
 		}
@@ -364,36 +365,36 @@ public void testSerializedCopyIndividually() {
 			fail("Exception in test: " + e.getMessage());
 		}
 	}
-	
-	
+
+
 	@Test
 	public void testSerializedCopyAsSequence() {
 		try {
 			TypeSerializer<T> serializer = getSerializer();
 			T[] testData = getData();
-			
+
 			TestOutputView out = new TestOutputView();
 			for (T value : testData) {
 				serializer.serialize(value, out);
 			}
-			
+
 			TestInputView source = out.getInputView();
 			TestOutputView target = new TestOutputView();
 			for (int i = 0; i < testData.length; i++) {
 				serializer.copy(source, target);
 			}
-			
+
 			TestInputView toVerify = target.getInputView();
 			int num = 0;
-			
+
 			while (toVerify.available() > 0) {
 				T deserialized = serializer.deserialize(serializer.createInstance(), toVerify);
-				deserialized.toString();
+				checkToString(deserialized);
 
 				deepEquals("Deserialized value if wrong.", testData[num], deserialized);
 				num++;
 			}
-			
+
 			assertEquals("Wrong number of elements copied.", testData.length, num);
 		}
 		catch (Exception e) {
@@ -402,7 +403,7 @@ public void testSerializedCopyAsSequence() {
 			fail("Exception in test: " + e.getMessage());
 		}
 	}
-	
+
 	@Test
 	public void testSerializabilityAndEquals() {
 		try {
@@ -414,7 +415,7 @@ public void testSerializabilityAndEquals() {
 				fail("The serializer is not serializable: " + e);
 				return;
 			}
-			
+
 			assertEquals("The copy of the serializer is not equal to the original one.", ser1, ser2);
 		}
 		catch (Exception e) {
@@ -423,10 +424,26 @@ public void testSerializabilityAndEquals() {
 			fail("Exception in test: " + e.getMessage());
 		}
 	}
-	
+
+	@Test
+	public void testNullability() {
+		TypeSerializer<T> serializer = getSerializer();
+		try {
+			NullableSerializer.checkIfNullSupported(serializer);
+		} catch (Throwable t) {
+			System.err.println(t.getMessage());
+			t.printStackTrace();
+			fail("Unexpected failure of null value handling: " + t.getMessage());
+		}
+	}
+
 	// --------------------------------------------------------------------------------------------
-	
+
 	protected void deepEquals(String message, T should, T is) {
+		Assert.assertTrue((should == null && is == null) || (should != null && is != null));
+		if (should == null) {
+			return;
+		}
 		if (should.getClass().isArray()) {
 			if (should instanceof boolean[]) {
 				Assert.assertTrue(message, Arrays.equals((boolean[]) should, (boolean[]) is));
@@ -463,9 +480,9 @@ else if (should instanceof Throwable) {
 			assertEquals(message,  should, is);
 		}
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
-	
+
 	protected TypeSerializer<T> getSerializer() {
 		TypeSerializer<T> serializer = createSerializer();
 		if (serializer == null) {
@@ -473,7 +490,7 @@ else if (should instanceof Throwable) {
 		}
 		return serializer;
 	}
-	
+
 	private T[] getData() {
 		T[] data = getTestData();
 		if (data == null) {
@@ -481,15 +498,15 @@ else if (should instanceof Throwable) {
 		}
 		return data;
 	}
-	
+
 	// --------------------------------------------------------------------------------------------
-	
+
 	private static final class TestOutputView extends DataOutputStream implements DataOutputView {
-		
+
 		public TestOutputView() {
 			super(new ByteArrayOutputStream(4096));
 		}
-		
+
 		public TestInputView getInputView() {
 			ByteArrayOutputStream baos = (ByteArrayOutputStream) out;
 			return new TestInputView(baos.toByteArray());
@@ -509,8 +526,8 @@ public void write(DataInputView source, int numBytes) throws IOException {
 			write(buffer);
 		}
 	}
-	
-	
+
+
 	private static final class TestInputView extends DataInputStream implements DataInputView {
 
 		public TestInputView(byte[] data) {
@@ -542,4 +559,10 @@ public int hashCode() {
 			return getClass().hashCode();
 		}
 	}
+
+	private static <T> void checkToString(T value) {
+		if (value != null) {
+			value.toString();
+		}
+	}
 }
diff --git a/flink-core/src/test/java/org/apache/flink/api/common/typeutils/TypeInformationTestBase.java b/flink-core/src/test/java/org/apache/flink/api/common/typeutils/TypeInformationTestBase.java
index bd35070a0d5..164fc9518e6 100644
--- a/flink-core/src/test/java/org/apache/flink/api/common/typeutils/TypeInformationTestBase.java
+++ b/flink-core/src/test/java/org/apache/flink/api/common/typeutils/TypeInformationTestBase.java
@@ -95,6 +95,16 @@ public void testSerialization() {
 		}
 	}
 
+	@Test
+	public void testGetTotalFields() {
+		final T[] testData = getTestData();
+		for (T typeInfo : testData) {
+			assertTrue(
+				"Number of total fields must be at least 1",
+				typeInfo.getTotalFields() > 0);
+		}
+	}
+
 	private static class UnrelatedTypeInfo extends TypeInformation<Object> {
 
 		@Override
diff --git a/flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/EnumSerializerUpgradeTest.java b/flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/EnumSerializerUpgradeTest.java
index 1f67acbab7d..2bcae452287 100644
--- a/flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/EnumSerializerUpgradeTest.java
+++ b/flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/EnumSerializerUpgradeTest.java
@@ -23,21 +23,17 @@
 import org.apache.flink.api.common.typeutils.TypeSerializerSerializationUtil;
 import org.apache.flink.core.memory.DataInputViewStreamWrapper;
 import org.apache.flink.core.memory.DataOutputViewStreamWrapper;
+import org.apache.flink.testutils.ClassLoaderUtils;
 import org.apache.flink.util.TestLogger;
+
 import org.junit.Assert;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 
-import javax.tools.JavaCompiler;
-import javax.tools.ToolProvider;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.FileWriter;
 import java.io.IOException;
-import java.net.URL;
-import java.net.URLClassLoader;
 
 public class EnumSerializerUpgradeTest extends TestLogger {
 
@@ -87,7 +83,7 @@ public void checkDifferentFieldOrder() throws Exception {
 	private static CompatibilityResult checkCompatibility(String enumSourceA, String enumSourceB)
 		throws IOException, ClassNotFoundException {
 
-		ClassLoader classLoader = compileAndLoadEnum(
+		ClassLoader classLoader = ClassLoaderUtils.compileAndLoadJava(
 			temporaryFolder.newFolder(), ENUM_NAME + ".java", enumSourceA);
 
 		EnumSerializer enumSerializer = new EnumSerializer(classLoader.loadClass(ENUM_NAME));
@@ -102,7 +98,7 @@ private static CompatibilityResult checkCompatibility(String enumSourceA, String
 			snapshotBytes = outBuffer.toByteArray();
 		}
 
-		ClassLoader classLoader2 = compileAndLoadEnum(
+		ClassLoader classLoader2 = ClassLoaderUtils.compileAndLoadJava(
 			temporaryFolder.newFolder(), ENUM_NAME + ".java", enumSourceB);
 
 		TypeSerializerConfigSnapshot restoredSnapshot;
@@ -116,29 +112,4 @@ private static CompatibilityResult checkCompatibility(String enumSourceA, String
 		EnumSerializer enumSerializer2 = new EnumSerializer(classLoader2.loadClass(ENUM_NAME));
 		return enumSerializer2.ensureCompatibility(restoredSnapshot);
 	}
-
-	private static ClassLoader compileAndLoadEnum(File root, String filename, String source) throws IOException {
-		File file = writeSourceFile(root, filename, source);
-
-		compileClass(file);
-
-		return new URLClassLoader(
-			new URL[]{root.toURI().toURL()},
-			Thread.currentThread().getContextClassLoader());
-	}
-
-	private static File writeSourceFile(File root, String filename, String source) throws IOException {
-		File file = new File(root, filename);
-		FileWriter fileWriter = new FileWriter(file);
-
-		fileWriter.write(source);
-		fileWriter.close();
-
-		return file;
-	}
-
-	private static int compileClass(File sourceFile) {
-		JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
-		return compiler.run(null, null, null, "-proc:none", sourceFile.getPath());
-	}
 }
diff --git a/flink-core/src/test/java/org/apache/flink/api/java/tuple/TupleGenerator.java b/flink-core/src/test/java/org/apache/flink/api/java/tuple/TupleGenerator.java
index bd5598ad983..d6849676466 100644
--- a/flink-core/src/test/java/org/apache/flink/api/java/tuple/TupleGenerator.java
+++ b/flink-core/src/test/java/org/apache/flink/api/java/tuple/TupleGenerator.java
@@ -133,6 +133,17 @@ private static void insertCodeIntoFile(String code, File file) throws IOExceptio
 	private static void modifyTupleType(File root) throws IOException {
 		// generate code
 		StringBuilder sb = new StringBuilder();
+		sb.append("\tpublic static Tuple newInstance(int arity) {\n");
+		sb.append("\t\tswitch (arity) {\n");
+		// special case for Tuple0:
+		sb.append("\t\t\tcase 0: return Tuple0.INSTANCE;\n");
+		for (int i = FIRST; i <= LAST; i++) {
+			sb.append("\t\t\tcase ").append(i).append(": return new Tuple").append(i).append("();\n");
+		}
+		sb.append("\t\t\tdefault: throw new IllegalArgumentException(\"The tuple arity must be in [0, \" + MAX_ARITY + \"].\");\n");
+		sb.append("\t\t}\n");
+		sb.append("\t}\n\n");
+
 		sb.append("\tprivate static final Class<?>[] CLASSES = new Class<?>[] {\n\t\tTuple0.class");
 		for (int i = FIRST; i <= LAST; i++) {
 			sb.append(", Tuple").append(i).append(".class");
diff --git a/flink-java8/src/test/java/org/apache/flink/api/java/type/lambdas/LambdaExtractionTest.java b/flink-core/src/test/java/org/apache/flink/api/java/typeutils/LambdaExtractionTest.java
similarity index 54%
rename from flink-java8/src/test/java/org/apache/flink/api/java/type/lambdas/LambdaExtractionTest.java
rename to flink-core/src/test/java/org/apache/flink/api/java/typeutils/LambdaExtractionTest.java
index de1f395ac1b..1d5cf22a5c1 100644
--- a/flink-java8/src/test/java/org/apache/flink/api/java/type/lambdas/LambdaExtractionTest.java
+++ b/flink-core/src/test/java/org/apache/flink/api/java/typeutils/LambdaExtractionTest.java
@@ -16,13 +16,10 @@
  * limitations under the License.
  */
 
-package org.apache.flink.api.java.type.lambdas;
+package org.apache.flink.api.java.typeutils;
 
 import org.apache.flink.api.common.functions.CoGroupFunction;
-import org.apache.flink.api.common.functions.CrossFunction;
-import org.apache.flink.api.common.functions.FlatJoinFunction;
 import org.apache.flink.api.common.functions.FlatMapFunction;
-import org.apache.flink.api.common.functions.GroupReduceFunction;
 import org.apache.flink.api.common.functions.InvalidTypesException;
 import org.apache.flink.api.common.functions.JoinFunction;
 import org.apache.flink.api.common.functions.MapFunction;
@@ -36,12 +33,7 @@
 import org.apache.flink.api.java.functions.KeySelector;
 import org.apache.flink.api.java.tuple.Tuple1;
 import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.api.java.typeutils.MissingTypeInfo;
-import org.apache.flink.api.java.typeutils.TupleTypeInfo;
-import org.apache.flink.api.java.typeutils.TypeExtractionUtils;
-import org.apache.flink.api.java.typeutils.TypeExtractor;
 
-import org.junit.Assert;
 import org.junit.Test;
 
 import java.lang.reflect.Method;
@@ -50,12 +42,12 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.assertTrue;
 
 /**
- * Tests the type extractor for lambda functions.
+ * Tests the type extractor for lambda functions. Many tests only work if the compiler supports
+ * lambdas properly otherwise a MissingTypeInfo is returned.
  */
-@SuppressWarnings("serial")
 public class LambdaExtractionTest {
 
 	private static final TypeInformation<Tuple2<Tuple1<Integer>, Boolean>> NESTED_TUPLE_BOOLEAN_TYPE =
@@ -65,48 +57,43 @@
 			new TypeHint<Tuple2<Tuple1<Integer>, Double>>(){}.getTypeInfo();
 
 	@Test
-	public void testIdentifyLambdas() {
-		try {
-			MapFunction<?, ?> anonymousFromInterface = new MapFunction<String, Integer>() {
-				@Override
-				public Integer map(String value) {
-					return Integer.parseInt(value);
-				}
-			};
-
-			MapFunction<?, ?> anonymousFromClass = new RichMapFunction<String, Integer>() {
-				@Override
-				public Integer map(String value) {
-					return Integer.parseInt(value);
-				}
-			};
-
-			MapFunction<?, ?> fromProperClass = new StaticMapper();
-
-			MapFunction<?, ?> fromDerived = new ToTuple<Integer>() {
-				@Override
-				public Tuple2<Integer, Long> map(Integer value) {
-					return new Tuple2<>(value, 1L);
-				}
-			};
-
-			MapFunction<String, Integer> staticLambda = Integer::parseInt;
-			MapFunction<Integer, String> instanceLambda = Object::toString;
-			MapFunction<String, Integer> constructorLambda = Integer::new;
-
-			assertNull(checkAndExtractLambda(anonymousFromInterface));
-			assertNull(checkAndExtractLambda(anonymousFromClass));
-			assertNull(checkAndExtractLambda(fromProperClass));
-			assertNull(checkAndExtractLambda(fromDerived));
-			assertNotNull(checkAndExtractLambda(staticLambda));
-			assertNotNull(checkAndExtractLambda(instanceLambda));
-			assertNotNull(checkAndExtractLambda(constructorLambda));
-			assertNotNull(checkAndExtractLambda(STATIC_LAMBDA));
-		}
-		catch (Exception e) {
-			e.printStackTrace();
-			fail(e.getMessage());
-		}
+	@SuppressWarnings({"Convert2Lambda", "Anonymous2MethodRef"})
+	public void testIdentifyLambdas() throws TypeExtractionException {
+		MapFunction<?, ?> anonymousFromInterface = new MapFunction<String, Integer>() {
+			@Override
+			public Integer map(String value) {
+				return Integer.parseInt(value);
+			}
+		};
+
+		MapFunction<?, ?> anonymousFromClass = new RichMapFunction<String, Integer>() {
+			@Override
+			public Integer map(String value) {
+				return Integer.parseInt(value);
+			}
+		};
+
+		MapFunction<?, ?> fromProperClass = new StaticMapper();
+
+		MapFunction<?, ?> fromDerived = new ToTuple<Integer>() {
+			@Override
+			public Tuple2<Integer, Long> map(Integer value) {
+				return new Tuple2<>(value, 1L);
+			}
+		};
+
+		MapFunction<String, Integer> staticLambda = Integer::parseInt;
+		MapFunction<Integer, String> instanceLambda = Object::toString;
+		MapFunction<String, Integer> constructorLambda = Integer::new;
+
+		assertNull(checkAndExtractLambda(anonymousFromInterface));
+		assertNull(checkAndExtractLambda(anonymousFromClass));
+		assertNull(checkAndExtractLambda(fromProperClass));
+		assertNull(checkAndExtractLambda(fromDerived));
+		assertNotNull(checkAndExtractLambda(staticLambda));
+		assertNotNull(checkAndExtractLambda(instanceLambda));
+		assertNotNull(checkAndExtractLambda(constructorLambda));
+		assertNotNull(checkAndExtractLambda(STATIC_LAMBDA));
 	}
 
 	private static class StaticMapper implements MapFunction<String, Integer> {
@@ -134,7 +121,7 @@ public Integer map(String value) {
 	@Test
 	public void testLambdaWithMemberVariable() {
 		TypeInformation<?> ti = TypeExtractor.getMapReturnTypes(new MyClass().getMapFunction(), Types.INT);
-		Assert.assertEquals(ti, BasicTypeInfo.STRING_TYPE_INFO);
+		assertEquals(ti, BasicTypeInfo.STRING_TYPE_INFO);
 	}
 
 	@Test
@@ -146,32 +133,41 @@ public void testLambdaWithLocalVariable() {
 		MapFunction<Integer, String> f = (i) -> s + k + j;
 
 		TypeInformation<?> ti = TypeExtractor.getMapReturnTypes(f, Types.INT);
-		Assert.assertEquals(ti, BasicTypeInfo.STRING_TYPE_INFO);
+		assertEquals(ti, BasicTypeInfo.STRING_TYPE_INFO);
+	}
+
+	@Test
+	public void testLambdaWithNonGenericResultType() {
+		MapFunction<Tuple2<Tuple1<Integer>, Boolean>, Boolean> f = (i) -> null;
+
+		TypeInformation<?> ti = TypeExtractor.getMapReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE, null, true);
+		assertTrue(ti instanceof BasicTypeInfo);
+		assertEquals(BasicTypeInfo.BOOLEAN_TYPE_INFO, ti);
 	}
 
 	@Test
 	public void testMapLambda() {
 		MapFunction<Tuple2<Tuple1<Integer>, Boolean>, Tuple2<Tuple1<Integer>, String>> f = (i) -> null;
 
-		TypeInformation<?> ti = TypeExtractor.getMapReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE);
+		TypeInformation<?> ti = TypeExtractor.getMapReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE, null, true);
 		if (!(ti instanceof MissingTypeInfo)) {
-			Assert.assertTrue(ti.isTupleType());
-			Assert.assertEquals(2, ti.getArity());
-			Assert.assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
-			Assert.assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
+			assertTrue(ti.isTupleType());
+			assertEquals(2, ti.getArity());
+			assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
+			assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
 		}
 	}
 
 	@Test
 	public void testFlatMapLambda() {
-		FlatMapFunction<Tuple2<Tuple1<Integer>, Boolean>, Tuple2<Tuple1<Integer>, String>> f = (i, o) -> {};
+		FlatMapFunction<Tuple2<Tuple1<Integer>, Boolean>, Tuple2<Tuple1<Integer>, String>> f = (i, out) -> out.collect(null);
 
-		TypeInformation<?> ti = TypeExtractor.getFlatMapReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE);
+		TypeInformation<?> ti = TypeExtractor.getFlatMapReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE, null, true);
 		if (!(ti instanceof MissingTypeInfo)) {
-			Assert.assertTrue(ti.isTupleType());
-			Assert.assertEquals(2, ti.getArity());
-			Assert.assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
-			Assert.assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
+			assertTrue(ti.isTupleType());
+			assertEquals(2, ti.getArity());
+			assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
+			assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
 		}
 	}
 
@@ -179,38 +175,12 @@ public void testFlatMapLambda() {
 	public void testMapPartitionLambda() {
 		MapPartitionFunction<Tuple2<Tuple1<Integer>, Boolean>, Tuple2<Tuple1<Integer>, String>> f = (i, o) -> {};
 
-		TypeInformation<?> ti = TypeExtractor.getMapPartitionReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE);
-		if (!(ti instanceof MissingTypeInfo)) {
-			Assert.assertTrue(ti.isTupleType());
-			Assert.assertEquals(2, ti.getArity());
-			Assert.assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
-			Assert.assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
-		}
-	}
-
-	@Test
-	public void testGroupReduceLambda() {
-		GroupReduceFunction<Tuple2<Tuple1<Integer>, Boolean>, Tuple2<Tuple1<Integer>, String>> f = (i, o) -> {};
-
-		TypeInformation<?> ti = TypeExtractor.getGroupReduceReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE);
-		if (!(ti instanceof MissingTypeInfo)) {
-			Assert.assertTrue(ti.isTupleType());
-			Assert.assertEquals(2, ti.getArity());
-			Assert.assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
-			Assert.assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
-		}
-	}
-
-	@Test
-	public void testFlatJoinLambda() {
-		FlatJoinFunction<Tuple2<Tuple1<Integer>, Boolean>, Tuple2<Tuple1<Integer>, Double>, Tuple2<Tuple1<Integer>, String>> f = (i1, i2, o) -> {};
-
-		TypeInformation<?> ti = TypeExtractor.getFlatJoinReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE, NESTED_TUPLE_DOUBLE_TYPE);
+		TypeInformation<?> ti = TypeExtractor.getMapPartitionReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE, null, true);
 		if (!(ti instanceof MissingTypeInfo)) {
-			Assert.assertTrue(ti.isTupleType());
-			Assert.assertEquals(2, ti.getArity());
-			Assert.assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
-			Assert.assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
+			assertTrue(ti.isTupleType());
+			assertEquals(2, ti.getArity());
+			assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
+			assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
 		}
 	}
 
@@ -218,12 +188,12 @@ public void testFlatJoinLambda() {
 	public void testJoinLambda() {
 		JoinFunction<Tuple2<Tuple1<Integer>, Boolean>, Tuple2<Tuple1<Integer>, Double>, Tuple2<Tuple1<Integer>, String>> f = (i1, i2) -> null;
 
-		TypeInformation<?> ti = TypeExtractor.getJoinReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE, NESTED_TUPLE_DOUBLE_TYPE);
+		TypeInformation<?> ti = TypeExtractor.getJoinReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE, NESTED_TUPLE_DOUBLE_TYPE, null, true);
 		if (!(ti instanceof MissingTypeInfo)) {
-			Assert.assertTrue(ti.isTupleType());
-			Assert.assertEquals(2, ti.getArity());
-			Assert.assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
-			Assert.assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
+			assertTrue(ti.isTupleType());
+			assertEquals(2, ti.getArity());
+			assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
+			assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
 		}
 	}
 
@@ -231,25 +201,12 @@ public void testJoinLambda() {
 	public void testCoGroupLambda() {
 		CoGroupFunction<Tuple2<Tuple1<Integer>, Boolean>, Tuple2<Tuple1<Integer>, Double>, Tuple2<Tuple1<Integer>, String>> f = (i1, i2, o) -> {};
 
-		TypeInformation<?> ti = TypeExtractor.getCoGroupReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE, NESTED_TUPLE_DOUBLE_TYPE);
-		if (!(ti instanceof MissingTypeInfo)) {
-			Assert.assertTrue(ti.isTupleType());
-			Assert.assertEquals(2, ti.getArity());
-			Assert.assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
-			Assert.assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
-		}
-	}
-
-	@Test
-	public void testCrossLambda() {
-		CrossFunction<Tuple2<Tuple1<Integer>, Boolean>, Tuple2<Tuple1<Integer>, Double>, Tuple2<Tuple1<Integer>, String>> f = (i1, i2) -> null;
-
-		TypeInformation<?> ti = TypeExtractor.getCrossReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE, NESTED_TUPLE_DOUBLE_TYPE);
+		TypeInformation<?> ti = TypeExtractor.getCoGroupReturnTypes(f, NESTED_TUPLE_BOOLEAN_TYPE, NESTED_TUPLE_DOUBLE_TYPE, null, true);
 		if (!(ti instanceof MissingTypeInfo)) {
-			Assert.assertTrue(ti.isTupleType());
-			Assert.assertEquals(2, ti.getArity());
-			Assert.assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
-			Assert.assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
+			assertTrue(ti.isTupleType());
+			assertEquals(2, ti.getArity());
+			assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
+			assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
 		}
 	}
 
@@ -257,12 +214,12 @@ public void testCrossLambda() {
 	public void testKeySelectorLambda() {
 		KeySelector<Tuple2<Tuple1<Integer>, Boolean>, Tuple2<Tuple1<Integer>, String>> f = (i) -> null;
 
-		TypeInformation<?> ti = TypeExtractor.getKeySelectorTypes(f, NESTED_TUPLE_BOOLEAN_TYPE);
+		TypeInformation<?> ti = TypeExtractor.getKeySelectorTypes(f, NESTED_TUPLE_BOOLEAN_TYPE, null, true);
 		if (!(ti instanceof MissingTypeInfo)) {
-			Assert.assertTrue(ti.isTupleType());
-			Assert.assertEquals(2, ti.getArity());
-			Assert.assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
-			Assert.assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
+			assertTrue(ti.isTupleType());
+			assertEquals(2, ti.getArity());
+			assertTrue(((TupleTypeInfo<?>) ti).getTypeAt(0).isTupleType());
+			assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
 		}
 	}
 
@@ -271,19 +228,20 @@ public void testKeySelectorLambda() {
 	public void testLambdaTypeErasure() {
 		MapFunction<Tuple1<Integer>, Tuple1> f = (i) -> null;
 		TypeInformation<?> ti = TypeExtractor.getMapReturnTypes(f, new TypeHint<Tuple1<Integer>>(){}.getTypeInfo(), null, true);
-		Assert.assertTrue(ti instanceof MissingTypeInfo);
+		assertTrue(ti instanceof MissingTypeInfo);
 	}
 
 	@Test
 	public void testPartitionerLambda() {
 		Partitioner<Tuple2<Integer, String>> partitioner = (key, numPartitions) -> key.f1.length() % numPartitions;
-		final TypeInformation<?> ti = TypeExtractor.getPartitionerTypes(partitioner);
-
-		Assert.assertTrue(ti.isTupleType());
-		Assert.assertEquals(2, ti.getArity());
-		Assert.assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(0), BasicTypeInfo.INT_TYPE_INFO);
-		Assert.assertEquals(((TupleTypeInfo<?>) ti).getTypeAt(1), BasicTypeInfo.STRING_TYPE_INFO);
+		final TypeInformation<?> ti = TypeExtractor.getPartitionerTypes(partitioner, null, true);
 

  (This diff was longer than 20,000 lines, and has been truncated...)


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services