You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by rm...@apache.org on 2017/01/18 14:05:00 UTC

[38/39] flink-web git commit: Rebuild site

http://git-wip-us.apache.org/repos/asf/flink-web/blob/9ec0a879/content/blog/feed.xml
----------------------------------------------------------------------
diff --git a/content/blog/feed.xml b/content/blog/feed.xml
new file mode 100644
index 0000000..066ae2f
--- /dev/null
+++ b/content/blog/feed.xml
@@ -0,0 +1,6041 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
+<channel>
+<title>Flink Blog Feed</title>
+<description>Flink Blog</description>
+<link>http://flink.apache.org/blog</link>
+<atom:link href="http://flink.apache.org/blog/feed.xml" rel="self" type="application/rss+xml" />
+
+<item>
+<title>Apache Flink 1.1.4 Released</title>
+<description>&lt;p&gt;The Apache Flink community released the next bugfix version of the Apache Flink 1.1 series.&lt;/p&gt;
+
+&lt;p&gt;This release includes major robustness improvements for checkpoint cleanup on failures and consumption of intermediate streams. We highly recommend all users to upgrade to Flink 1.1.4.&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-xml&quot;&gt;&lt;span class=&quot;nt&quot;&gt;&amp;lt;dependency&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;groupId&amp;gt;&lt;/span&gt;org.apache.flink&lt;span class=&quot;nt&quot;&gt;&amp;lt;/groupId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;artifactId&amp;gt;&lt;/span&gt;flink-java&lt;span class=&quot;nt&quot;&gt;&amp;lt;/artifactId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;version&amp;gt;&lt;/span&gt;1.1.4&lt;span class=&quot;nt&quot;&gt;&amp;lt;/version&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;/dependency&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;dependency&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;groupId&amp;gt;&lt;/span&gt;org.apache.flink&lt;span class=&quot;nt&quot;&gt;&amp;lt;/groupId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;artifactId&amp;gt;&lt;/span&gt;flink-streaming-java_2.10&lt;span class=&quot;nt&quot;&gt;&amp;lt;/artifactId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;version&amp;gt;&lt;/span&gt;1.1.4&lt;span class=&quot;nt&quot;&gt;&amp;lt;/version&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;/dependency&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;dependency&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;groupId&amp;gt;&lt;/span&gt;org.apache.flink&lt;span class=&quot;nt&quot;&gt;&amp;lt;/groupId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;artifactId&amp;gt;&lt;/span&gt;flink-clients_2.10&lt;span class=&quot;nt&quot;&gt;&amp;lt;/artifactId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;version&amp;gt;&lt;/span&gt;1.1.4&lt;span class=&quot;nt&quot;&gt;&amp;lt;/version&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;/dependency&amp;gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;p&gt;You can find the binaries on the updated &lt;a href=&quot;http://flink.apache.org/downloads.html&quot;&gt;Downloads page&lt;/a&gt;.&lt;/p&gt;
+
+&lt;h2 id=&quot;note-for-rocksdb-backend-users&quot;&gt;Note for RocksDB Backend Users&lt;/h2&gt;
+
+&lt;p&gt;We updated Flink\u2019s RocksDB dependency version from &lt;code&gt;4.5.1&lt;/code&gt; to &lt;code&gt;4.11.2&lt;/code&gt;. Between these versions some of RocksDB\u2019s internal configuration defaults changed that would affect the memory footprint of running Flink with RocksDB. Therefore, we manually reset them to the previous defaults. If you want to run with the new Rocks 4.11.2 defaults, you can do this via:&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-java&quot;&gt;&lt;span class=&quot;n&quot;&gt;RocksDBStateBackend&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;backend&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;=&lt;/span&gt; &lt;span class=&quot;k&quot;&gt;new&lt;/span&gt; &lt;span class=&quot;nf&quot;&gt;RocksDBStateBackend&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;s&quot;&gt;&amp;quot;...&amp;quot;&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;);&lt;/span&gt;
+&lt;span class=&quot;c1&quot;&gt;// Use the new default options. Otherwise, the default for RocksDB 4.5.1&lt;/span&gt;
+&lt;span class=&quot;c1&quot;&gt;// `PredefinedOptions.DEFAULT_ROCKS_4_5_1` will be used.&lt;/span&gt;
+&lt;span class=&quot;n&quot;&gt;backend&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;setPredefinedOptions&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;PredefinedOptions&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;DEFAULT&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;);&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;h2 id=&quot;release-notes---flink---version-114&quot;&gt;Release Notes - Flink - Version 1.1.4&lt;/h2&gt;
+
+&lt;h3 id=&quot;sub-task&quot;&gt;Sub-task&lt;/h3&gt;
+&lt;ul&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4510&quot;&gt;FLINK-4510&lt;/a&gt;] -         Always create CheckpointCoordinator
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4984&quot;&gt;FLINK-4984&lt;/a&gt;] -         Add Cancellation Barriers to BarrierTracker and BarrierBuffer
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4985&quot;&gt;FLINK-4985&lt;/a&gt;] -         Report Declined/Canceled Checkpoints to Checkpoint Coordinator
+&lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;h3 id=&quot;bug&quot;&gt;Bug&lt;/h3&gt;
+&lt;ul&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-2662&quot;&gt;FLINK-2662&lt;/a&gt;] -         CompilerException: &amp;quot;Bug: Plan generation for Unions picked a ship strategy between binary plan operators.&amp;quot;
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-3680&quot;&gt;FLINK-3680&lt;/a&gt;] -         Remove or improve (not set) text in the Job Plan UI
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-3813&quot;&gt;FLINK-3813&lt;/a&gt;] -         YARNSessionFIFOITCase.testDetachedMode failed on Travis
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4108&quot;&gt;FLINK-4108&lt;/a&gt;] -         NPE in Row.productArity
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4506&quot;&gt;FLINK-4506&lt;/a&gt;] -         CsvOutputFormat defaults allowNullValues to false, even though doc and declaration says true
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4581&quot;&gt;FLINK-4581&lt;/a&gt;] -         Table API throws &amp;quot;No suitable driver found for jdbc:calcite&amp;quot;
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4586&quot;&gt;FLINK-4586&lt;/a&gt;] -         NumberSequenceIterator and Accumulator threading issue
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4619&quot;&gt;FLINK-4619&lt;/a&gt;] -         JobManager does not answer to client when restore from savepoint fails
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4727&quot;&gt;FLINK-4727&lt;/a&gt;] -         Kafka 0.9 Consumer should also checkpoint auto retrieved offsets even when no data is read
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4862&quot;&gt;FLINK-4862&lt;/a&gt;] -         NPE on EventTimeSessionWindows with ContinuousEventTimeTrigger
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4932&quot;&gt;FLINK-4932&lt;/a&gt;] -         Don&amp;#39;t let ExecutionGraph fail when in state Restarting
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4933&quot;&gt;FLINK-4933&lt;/a&gt;] -         ExecutionGraph.scheduleOrUpdateConsumers can fail the ExecutionGraph
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4977&quot;&gt;FLINK-4977&lt;/a&gt;] -         Enum serialization does not work in all cases
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4991&quot;&gt;FLINK-4991&lt;/a&gt;] -         TestTask hangs in testWatchDogInterruptsTask
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4998&quot;&gt;FLINK-4998&lt;/a&gt;] -         ResourceManager fails when num task slots &amp;gt; Yarn vcores
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5013&quot;&gt;FLINK-5013&lt;/a&gt;] -         Flink Kinesis connector doesn&amp;#39;t work on old EMR versions
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5028&quot;&gt;FLINK-5028&lt;/a&gt;] -         Stream Tasks must not go through clean shutdown logic on cancellation
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5038&quot;&gt;FLINK-5038&lt;/a&gt;] -         Errors in the &amp;quot;cancelTask&amp;quot; method prevent closeables from being closed early
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5039&quot;&gt;FLINK-5039&lt;/a&gt;] -         Avro GenericRecord support is broken
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5040&quot;&gt;FLINK-5040&lt;/a&gt;] -         Set correct input channel types with eager scheduling
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5050&quot;&gt;FLINK-5050&lt;/a&gt;] -         JSON.org license is CatX
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5057&quot;&gt;FLINK-5057&lt;/a&gt;] -         Cancellation timeouts are picked from wrong config
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5058&quot;&gt;FLINK-5058&lt;/a&gt;] -         taskManagerMemory attribute set wrong value in FlinkShell
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5063&quot;&gt;FLINK-5063&lt;/a&gt;] -         State handles are not properly cleaned up for declined or expired checkpoints
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5073&quot;&gt;FLINK-5073&lt;/a&gt;] -         ZooKeeperCompleteCheckpointStore executes blocking delete operation in ZooKeeper client thread
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5075&quot;&gt;FLINK-5075&lt;/a&gt;] -         Kinesis consumer incorrectly determines shards as newly discovered when tested against Kinesalite
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5082&quot;&gt;FLINK-5082&lt;/a&gt;] -         Pull ExecutionService lifecycle management out of the JobManager
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5085&quot;&gt;FLINK-5085&lt;/a&gt;] -         Execute CheckpointCoodinator&amp;#39;s state discard calls asynchronously
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5114&quot;&gt;FLINK-5114&lt;/a&gt;] -         PartitionState update with finished execution fails
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5142&quot;&gt;FLINK-5142&lt;/a&gt;] -         Resource leak in CheckpointCoordinator
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5149&quot;&gt;FLINK-5149&lt;/a&gt;] -         ContinuousEventTimeTrigger doesn&amp;#39;t fire at the end of the window
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5154&quot;&gt;FLINK-5154&lt;/a&gt;] -         Duplicate TypeSerializer when writing RocksDB Snapshot
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5158&quot;&gt;FLINK-5158&lt;/a&gt;] -         Handle ZooKeeperCompletedCheckpointStore exceptions in CheckpointCoordinator
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5172&quot;&gt;FLINK-5172&lt;/a&gt;] -         In RocksDBStateBackend, set flink-core and flink-streaming-java to &amp;quot;provided&amp;quot;
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5173&quot;&gt;FLINK-5173&lt;/a&gt;] -         Upgrade RocksDB dependency
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5184&quot;&gt;FLINK-5184&lt;/a&gt;] -         Error result of compareSerialized in RowComparator class
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5193&quot;&gt;FLINK-5193&lt;/a&gt;] -         Recovering all jobs fails completely if a single recovery fails
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5197&quot;&gt;FLINK-5197&lt;/a&gt;] -         Late JobStatusChanged messages can interfere with running jobs
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5214&quot;&gt;FLINK-5214&lt;/a&gt;] -         Clean up checkpoint files when failing checkpoint operation on TM
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5215&quot;&gt;FLINK-5215&lt;/a&gt;] -         Close checkpoint streams upon cancellation
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5216&quot;&gt;FLINK-5216&lt;/a&gt;] -         CheckpointCoordinator&amp;#39;s &amp;#39;minPauseBetweenCheckpoints&amp;#39; refers to checkpoint start rather then checkpoint completion
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5218&quot;&gt;FLINK-5218&lt;/a&gt;] -         Eagerly close checkpoint streams on cancellation
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5228&quot;&gt;FLINK-5228&lt;/a&gt;] -         LocalInputChannel re-trigger request and release deadlock
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5229&quot;&gt;FLINK-5229&lt;/a&gt;] -         Cleanup StreamTaskStates if a checkpoint operation of a subsequent operator fails 
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5246&quot;&gt;FLINK-5246&lt;/a&gt;] -         Don&amp;#39;t discard unknown checkpoint messages in the CheckpointCoordinator
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5248&quot;&gt;FLINK-5248&lt;/a&gt;] -         SavepointITCase doesn&amp;#39;t catch savepoint restore failure
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5274&quot;&gt;FLINK-5274&lt;/a&gt;] -         LocalInputChannel throws NPE if partition reader is released
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5275&quot;&gt;FLINK-5275&lt;/a&gt;] -         InputChanelDeploymentDescriptors throws misleading Exception if producer failed/cancelled
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5276&quot;&gt;FLINK-5276&lt;/a&gt;] -         ExecutionVertex archiving can throw NPE with many previous attempts
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5285&quot;&gt;FLINK-5285&lt;/a&gt;] -         CancelCheckpointMarker flood when using at least once mode
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5326&quot;&gt;FLINK-5326&lt;/a&gt;] -         IllegalStateException: Bug in Netty consumer logic: reader queue got notified by partition about available data,  but none was available
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5352&quot;&gt;FLINK-5352&lt;/a&gt;] -         Restore RocksDB 1.1.3 memory behavior
+&lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;h3 id=&quot;improvement&quot;&gt;Improvement&lt;/h3&gt;
+&lt;ul&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-3347&quot;&gt;FLINK-3347&lt;/a&gt;] -         TaskManager (or its ActorSystem) need to restart in case they notice quarantine
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-3787&quot;&gt;FLINK-3787&lt;/a&gt;] -         Yarn client does not report unfulfillable container constraints
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4445&quot;&gt;FLINK-4445&lt;/a&gt;] -         Ignore unmatched state when restoring from savepoint
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4715&quot;&gt;FLINK-4715&lt;/a&gt;] -         TaskManager should commit suicide after cancellation failure
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4894&quot;&gt;FLINK-4894&lt;/a&gt;] -         Don&amp;#39;t block on buffer request after broadcastEvent 
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4975&quot;&gt;FLINK-4975&lt;/a&gt;] -         Add a limit for how much data may be buffered during checkpoint alignment
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4996&quot;&gt;FLINK-4996&lt;/a&gt;] -         Make CrossHint @Public
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5046&quot;&gt;FLINK-5046&lt;/a&gt;] -         Avoid redundant serialization when creating the TaskDeploymentDescriptor
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5123&quot;&gt;FLINK-5123&lt;/a&gt;] -         Add description how to do proper shading to Flink docs.
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5169&quot;&gt;FLINK-5169&lt;/a&gt;] -         Make consumption of input channels fair
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5192&quot;&gt;FLINK-5192&lt;/a&gt;] -         Provide better log config templates
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5194&quot;&gt;FLINK-5194&lt;/a&gt;] -         Log heartbeats on TRACE level
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5196&quot;&gt;FLINK-5196&lt;/a&gt;] -         Don&amp;#39;t log InputChannelDescriptor
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5198&quot;&gt;FLINK-5198&lt;/a&gt;] -         Overwrite TaskState toString
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5199&quot;&gt;FLINK-5199&lt;/a&gt;] -         Improve logging of submitted job graph actions in HA case
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5201&quot;&gt;FLINK-5201&lt;/a&gt;] -         Promote loaded config properties to INFO
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5207&quot;&gt;FLINK-5207&lt;/a&gt;] -         Decrease HadoopFileSystem logging
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5249&quot;&gt;FLINK-5249&lt;/a&gt;] -         description of datastream rescaling doesn&amp;#39;t match the figure
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5259&quot;&gt;FLINK-5259&lt;/a&gt;] -         wrong execution environment in retry delays example
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-5278&quot;&gt;FLINK-5278&lt;/a&gt;] -         Improve Task and checkpoint logging 
+&lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;h3 id=&quot;new-feature&quot;&gt;New Feature&lt;/h3&gt;
+&lt;ul&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4976&quot;&gt;FLINK-4976&lt;/a&gt;] -         Add a way to abort in flight checkpoints
+&lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;h3 id=&quot;task&quot;&gt;Task&lt;/h3&gt;
+&lt;ul&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4778&quot;&gt;FLINK-4778&lt;/a&gt;] -         Update program example in /docs/setup/cli.md due to the change in FLINK-2021
+&lt;/li&gt;
+&lt;/ul&gt;
+
+</description>
+<pubDate>Wed, 21 Dec 2016 10:00:00 +0100</pubDate>
+<link>http://flink.apache.org/news/2016/12/21/release-1.1.4.html</link>
+<guid isPermaLink="true">/news/2016/12/21/release-1.1.4.html</guid>
+</item>
+
+<item>
+<title>Apache Flink in 2016: Year in Review</title>
+<description>&lt;p&gt;2016 was an exciting year for the Apache Flink� community, and the
+  &lt;a href=&quot;http://flink.apache.org/news/2016/03/08/release-1.0.0.html&quot; target=&quot;_blank&quot;&gt;release of Flink 1.0 in March&lt;/a&gt;
+   marked the first time in Flink\u2019s history that the community guaranteed API backward compatibility for all
+   versions in a series. This step forward for Flink was followed by many new and exciting production deployments
+   in organizations of all shapes and sizes, all around the globe.&lt;/p&gt;
+
+&lt;p&gt;In this post, we\u2019ll look back on the project\u2019s progress over the course of 2016, and
+we\u2019ll also preview what 2017 has in store.&lt;/p&gt;
+
+&lt;div class=&quot;page-toc&quot;&gt;
+&lt;ul id=&quot;markdown-toc&quot;&gt;
+  &lt;li&gt;&lt;a href=&quot;#community-growth&quot; id=&quot;markdown-toc-community-growth&quot;&gt;Community Growth&lt;/a&gt;    &lt;ul&gt;
+      &lt;li&gt;&lt;a href=&quot;#github&quot; id=&quot;markdown-toc-github&quot;&gt;Github&lt;/a&gt;&lt;/li&gt;
+      &lt;li&gt;&lt;a href=&quot;#meetups&quot; id=&quot;markdown-toc-meetups&quot;&gt;Meetups&lt;/a&gt;&lt;/li&gt;
+    &lt;/ul&gt;
+  &lt;/li&gt;
+  &lt;li&gt;&lt;a href=&quot;#flink-forward-2016&quot; id=&quot;markdown-toc-flink-forward-2016&quot;&gt;Flink Forward 2016&lt;/a&gt;&lt;/li&gt;
+  &lt;li&gt;&lt;a href=&quot;#features-and-ecosystem&quot; id=&quot;markdown-toc-features-and-ecosystem&quot;&gt;Features and Ecosystem&lt;/a&gt;    &lt;ul&gt;
+      &lt;li&gt;&lt;a href=&quot;#flink-ecosystem-growth&quot; id=&quot;markdown-toc-flink-ecosystem-growth&quot;&gt;Flink Ecosystem Growth&lt;/a&gt;&lt;/li&gt;
+      &lt;li&gt;&lt;a href=&quot;#feature-timeline-in-2016&quot; id=&quot;markdown-toc-feature-timeline-in-2016&quot;&gt;Feature Timeline in 2016&lt;/a&gt;&lt;/li&gt;
+    &lt;/ul&gt;
+  &lt;/li&gt;
+  &lt;li&gt;&lt;a href=&quot;#looking-ahead-to-2017&quot; id=&quot;markdown-toc-looking-ahead-to-2017&quot;&gt;Looking ahead to 2017&lt;/a&gt;&lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;/div&gt;
+
+&lt;h2 id=&quot;community-growth&quot;&gt;Community Growth&lt;/h2&gt;
+
+&lt;h3 id=&quot;github&quot;&gt;Github&lt;/h3&gt;
+&lt;p&gt;First, here\u2019s a summary of community statistics from &lt;a href=&quot;https://github.com/apache/flink&quot; target=&quot;_blank&quot;&gt;GitHub&lt;/a&gt;. At the time of writing:&lt;/p&gt;
+&lt;ul&gt;
+  &lt;li&gt;&lt;b&gt;Contributors&lt;/b&gt; have increased from 150 in December 2015 to 258 in December 2016 (up &lt;b&gt;72%&lt;/b&gt;)&lt;/li&gt;
+  &lt;li&gt;&lt;b&gt;Stars&lt;/b&gt; have increased from 813 in December 2015 to 1830 in December 2016 (up &lt;b&gt;125%&lt;/b&gt;)&lt;/li&gt;
+  &lt;li&gt;&lt;b&gt;Forks&lt;/b&gt; have increased from 544 in December 2015 to 1255 in December 2016 (up &lt;b&gt;130%&lt;/b&gt;)&lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;p&gt;The community also welcomed &lt;b&gt;3 new committers in 2016&lt;/b&gt;: Chengxiang Li, Greg Hogan, and Tzu-Li (Gordon) Tai.&lt;/p&gt;
+
+&lt;p&gt;&lt;br /&gt;&lt;img src=&quot;/img/blog/github-stats-2016.png&quot; width=&quot;775&quot; alt=&quot;Apache Flink GitHub Stats&quot; /&gt;
+&lt;br /&gt;
+&lt;br /&gt;&lt;/p&gt;
+
+&lt;p&gt;Next, let\u2019s take a look at a few other project stats, starting with number of commits. If we run:&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code&gt;git log --pretty=oneline --after=12/31/2015 | wc -l
+&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+&lt;p&gt;\u2026inside the Flink repository, we\u2019ll see a total of &lt;strong&gt;1884&lt;/strong&gt; commits so far in 2016, bringing the all-time total commits to &lt;strong&gt;10,015&lt;/strong&gt;.&lt;/p&gt;
+
+&lt;p&gt;Now, let\u2019s go a bit deeper. And here are instructions in case you\u2019d like to take a look at this data yourself.&lt;/p&gt;
+
+&lt;ul&gt;
+  &lt;li&gt;Download gitstats from the &lt;a href=&quot;http://gitstats.sourceforge.net/&quot;&gt;project homepage&lt;/a&gt;. Or, on OS X with homebrew, type:&lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code&gt;brew install --HEAD homebrew/head-only/gitstats
+&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;ul&gt;
+  &lt;li&gt;Clone the Apache Flink git repository:&lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code&gt;git clone git@github.com:apache/flink.git
+&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;ul&gt;
+  &lt;li&gt;Generate the statistics&lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code&gt;gitstats flink/ flink-stats/
+&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;ul&gt;
+  &lt;li&gt;View all the statistics as an html page using your defaulf browser:&lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code&gt;open flink-stats/index.html
+&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+&lt;p&gt;2016 is the year that Flink surpassed 1 million lines of code, now clocking in at &lt;strong&gt;1,034,137&lt;/strong&gt; lines.&lt;/p&gt;
+
+&lt;p&gt;&lt;img src=&quot;/img/blog/flink-lines-of-code-2016.png&quot; align=&quot;center&quot; width=&quot;550&quot; alt=&quot;Flink Total Lines of Code&quot; /&gt;&lt;/p&gt;
+
+&lt;p&gt;Monday remains the day of the week with the most commits over the project\u2019s history:&lt;/p&gt;
+
+&lt;p&gt;&lt;img src=&quot;/img/blog/flink-dow-2016.png&quot; align=&quot;center&quot; width=&quot;550&quot; alt=&quot;Flink Commits by Day of Week&quot; /&gt;&lt;/p&gt;
+
+&lt;p&gt;And 5pm is still solidly the preferred commit time:&lt;/p&gt;
+
+&lt;p&gt;&lt;img src=&quot;/img/blog/flink-hod-2016.png&quot; align=&quot;center&quot; width=&quot;550&quot; alt=&quot;Flink Commits by Hour of Day&quot; /&gt;&lt;/p&gt;
+
+&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
+
+&lt;h3 id=&quot;meetups&quot;&gt;Meetups&lt;/h3&gt;
+&lt;p&gt;&lt;a href=&quot;https://www.meetup.com/topics/apache-flink/&quot; target=&quot;_blank&quot;&gt;Apache Flink Meetup membership&lt;/a&gt; grew by &lt;b&gt;240%&lt;/b&gt;
+this year, and at the time of writing, there are 41 meetups comprised of 16,541 members listing Flink as a topic\u2013up from 16 groups with 4,864 members in December 2015.
+The Flink community is proud to be truly global in nature.&lt;/p&gt;
+
+&lt;p&gt;&lt;img src=&quot;/img/blog/flink-meetups-dec2016.png&quot; width=&quot;775&quot; alt=&quot;Apache Flink Meetup Map&quot; /&gt;&lt;/p&gt;
+
+&lt;h2 id=&quot;flink-forward-2016&quot;&gt;Flink Forward 2016&lt;/h2&gt;
+
+&lt;p&gt;The &lt;a href=&quot;http://2016.flink-forward.org/&quot; target=&quot;_blank&quot;&gt;second annual Flink Forward conference &lt;/a&gt;took place in
+Berlin on September 12-14, and over 350 members of the Flink community came together for speaker sessions, training,
+and discussion about Flink. &lt;a href=&quot;http://2016.flink-forward.org/program/sessions/&quot; target=&quot;_blank&quot;&gt;Slides and videos&lt;/a&gt;
+ from speaker sessions are available online, and we encourage you to take a look if you\u2019re interested in learning more
+ about how Flink is used in production in a wide range of organizations.&lt;/p&gt;
+
+&lt;p&gt;Flink Forward will be expanding to &lt;a href=&quot;http://sf.flink-forward.org/&quot; target=&quot;_blank&quot;&gt;San Francisco in April 2017&lt;/a&gt;, and the &lt;a href=&quot;http://berlin.flink-forward.org/&quot; target=&quot;_blank&quot;&gt;third-annual Berlin event
+  is scheduled for September 2017.&lt;/a&gt;&lt;/p&gt;
+
+&lt;p&gt;&lt;img src=&quot;/img/blog/speaker-logos-ff2016.png&quot; width=&quot;775&quot; alt=&quot;Flink Forward Speakers&quot; /&gt;&lt;/p&gt;
+
+&lt;h2 id=&quot;features-and-ecosystem&quot;&gt;Features and Ecosystem&lt;/h2&gt;
+
+&lt;h3 id=&quot;flink-ecosystem-growth&quot;&gt;Flink Ecosystem Growth&lt;/h3&gt;
+
+&lt;p&gt;Flink was added to a selection of distributions during 2016, making it easier
+for an even larger base of users to start working with Flink:&lt;/p&gt;
+
+&lt;ul&gt;
+  &lt;li&gt;&lt;a href=&quot;https://aws.amazon.com/blogs/big-data/use-apache-flink-on-amazon-emr/&quot; target=&quot;_blank&quot;&gt;
+    Amazon EMR&lt;/a&gt;&lt;/li&gt;
+  &lt;li&gt;&lt;a href=&quot;https://cloud.google.com/dataproc/docs/release-notes/service#november_29_2016&quot; target=&quot;_blank&quot;&gt;
+    Google Cloud Dataproc&lt;/a&gt;&lt;/li&gt;
+  &lt;li&gt;&lt;a href=&quot;https://www.lightbend.com/blog/introducing-lightbend-fast-data-platform&quot; target=&quot;_blank&quot;&gt;
+    Lightbend Fast Data Platform&lt;/a&gt;&lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;p&gt;In addition, the Apache Beam and Flink communities teamed up to build a Flink runner for Beam that, according to the Google team, is &lt;a href=&quot;https://cloud.google.com/blog/big-data/2016/05/why-apache-beam-a-google-perspective&quot; target=&quot;_blank&quot;&gt;\u201csophisticated enough to be a compelling alternative to Cloud Dataflow when running on premise or on non-Google clouds\u201d&lt;/a&gt;.&lt;/p&gt;
+
+&lt;h3 id=&quot;feature-timeline-in-2016&quot;&gt;Feature Timeline in 2016&lt;/h3&gt;
+
+&lt;p&gt;Here\u2019s a selection of major features added to Flink over the course of 2016:&lt;/p&gt;
+
+&lt;p&gt;&lt;img src=&quot;/img/blog/flink-releases-2016.png&quot; width=&quot;775&quot; alt=&quot;Flink Release Timeline 2016&quot; /&gt;&lt;/p&gt;
+
+&lt;p&gt;If you spend time in the &lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4554?jql=project%20%3D%20FLINK%20AND%20issuetype%20%3D%20%22New%20Feature%22%20AND%20status%20%3D%20Resolved%20ORDER%20BY%20resolved%20DESC&quot; target=&quot;_blank&quot;&gt;Apache Flink JIRA project&lt;/a&gt;, you\u2019ll see that the Flink community has addressed every single one of the roadmap items identified
+in &lt;a href=&quot;http://flink.apache.org/news/2015/12/18/a-year-in-review.html&quot; target=&quot;_blank&quot;&gt;2015\u2019s year in review post&lt;/a&gt;. Here\u2019s to making that an annual tradition. :)&lt;/p&gt;
+
+&lt;h2 id=&quot;looking-ahead-to-2017&quot;&gt;Looking ahead to 2017&lt;/h2&gt;
+
+&lt;p&gt;A good source of information about the Flink community\u2019s roadmap is the list of
+&lt;a href=&quot;https://cwiki.apache.org/confluence/display/FLINK/Flink+Improvement+Proposals&quot; target=&quot;_blank&quot;&gt;Flink
+Improvement Proposals (FLIPs)&lt;/a&gt; in the project wiki. Below, we\u2019ll highlight a selection of FLIPs
+that have been accepted by the community as well as some that are still under discussion.&lt;/p&gt;
+
+&lt;p&gt;We should note that work is already underway on a number of these features, and some will even be included in Flink 1.2 at the beginning of 2017.&lt;/p&gt;
+
+&lt;ul&gt;
+  &lt;li&gt;
+    &lt;p&gt;&lt;strong&gt;A new Flink deployment and process model&lt;/strong&gt;, as described in &lt;a href=&quot;https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=65147077&quot; target=&quot;_blank&quot;&gt;FLIP-6&lt;a&gt;&lt;/a&gt;. This work ensures that Flink supports a wide
+range of deployment types and cluster managers, making it possible to run Flink smoothly in any environment.&lt;/a&gt;&lt;/p&gt;
+  &lt;/li&gt;
+  &lt;li&gt;
+    &lt;p&gt;&lt;strong&gt;Dynamic scaling&lt;/strong&gt; for both key-value state &lt;a href=&quot;https://github.com/apache/flink/pull/2440&quot; target=&quot;_blank&quot;&gt;(as described in
+this PR)&lt;a&gt;&lt;/a&gt; &lt;em&gt;and&lt;/em&gt; non-partitioned state &lt;a href=&quot;https://cwiki.apache.org/confluence/display/FLINK/FLIP-8%3A+Rescalable+Non-Partitioned+State&quot; target=&quot;_blank&quot;&gt;(as described in FLIP-8)&lt;a&gt;&lt;/a&gt;, ensuring that it\u2019s always possible to split or merge state when scaling up or down, respectively.&lt;/a&gt;&lt;/a&gt;&lt;/p&gt;
+  &lt;/li&gt;
+  &lt;li&gt;
+    &lt;p&gt;&lt;strong&gt;Asynchronous I/O&lt;/strong&gt;, as described in &lt;a href=&quot;https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=65870673&quot; target=&quot;_blank&quot;&gt;FLIP-12
+&lt;/a&gt;, which makes I/O access a less time-consuming process without adding complexity or the need for extra checkpoint coordination.&lt;/p&gt;
+  &lt;/li&gt;
+  &lt;li&gt;
+    &lt;p&gt;&lt;strong&gt;Enhancements to the window evictor&lt;/strong&gt;, as described in &lt;a href=&quot;https://cwiki.apache.org/confluence/display/FLINK/FLIP-4+%3A+Enhance+Window+Evictor&quot; target=&quot;_blank&quot;&gt;FLIP-4&lt;/a&gt;,
+to provide users with more control over how elements are evicted from a window.&lt;/p&gt;
+  &lt;/li&gt;
+  &lt;li&gt;
+    &lt;p&gt;&lt;strong&gt;Fined-grained recovery from task failures&lt;/strong&gt;, as described in &lt;a href=&quot;https://cwiki.apache.org/confluence/display/FLINK/FLIP-1+%3A+Fine+Grained+Recovery+from+Task+Failures&quot; target=&quot;_blank&quot;&gt;FLIP-1&lt;/a&gt;,
+to make it possible to restart only what needs to be restarted during recovery, building on cached intermediate results.&lt;/p&gt;
+  &lt;/li&gt;
+  &lt;li&gt;
+    &lt;p&gt;&lt;strong&gt;Unified checkpoints and savepoints&lt;/strong&gt;, as described in &lt;a href=&quot;https://cwiki.apache.org/confluence/display/FLINK/FLIP-10%3A+Unify+Checkpoints+and+Savepoints&quot; target=&quot;_blank&quot;&gt;FLIP-10&lt;/a&gt;, to
+allow savepoints to be triggered automatically\u2013important for program updates for the sake of error handling because savepoints allow the user to modify both
+ the job and Flink version whereas checkpoints can only be recovered with the same job.&lt;/p&gt;
+  &lt;/li&gt;
+  &lt;li&gt;
+    &lt;p&gt;&lt;strong&gt;Table API window aggregations&lt;/strong&gt;, as described in &lt;a href=&quot;https://cwiki.apache.org/confluence/display/FLINK/FLIP-11%3A+Table+API+Stream+Aggregations&quot; target=&quot;_blank&quot;&gt;FLIP-11&lt;/a&gt;, to support group-window and row-window aggregates on streaming and batch tables.&lt;/p&gt;
+  &lt;/li&gt;
+  &lt;li&gt;
+    &lt;p&gt;&lt;strong&gt;Side inputs&lt;/strong&gt;, as described in &lt;a href=&quot;https://docs.google.com/document/d/1hIgxi2Zchww_5fWUHLoYiXwSBXjv-M5eOv-MKQYN3m4/edit&quot; target=&quot;_blank&quot;&gt;this design document&lt;/a&gt;, to
+enable the joining of a main, high-throughput stream with one more more inputs with static or slowly-changing data.&lt;/p&gt;
+  &lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;p&gt;If you\u2019re interested in getting involved with Flink, we encourage you to take a look at the FLIPs and to join the discussion via the &lt;a href=&quot;http://flink.apache.org/community.html#mailing-lists&quot;&gt;Flink mailing lists&lt;/a&gt;.&lt;/p&gt;
+
+&lt;p&gt;Lastly, we\u2019d like to extend a sincere thank you to all of the Flink community for making 2016 a great year!&lt;/p&gt;
+</description>
+<pubDate>Mon, 19 Dec 2016 10:00:00 +0100</pubDate>
+<link>http://flink.apache.org/news/2016/12/19/2016-year-in-review.html</link>
+<guid isPermaLink="true">/news/2016/12/19/2016-year-in-review.html</guid>
+</item>
+
+<item>
+<title>Apache Flink 1.1.3 Released</title>
+<description>&lt;p&gt;The Apache Flink community released the next bugfix version of the Apache Flink 1.1. series.&lt;/p&gt;
+
+&lt;p&gt;We recommend all users to upgrade to Flink 1.1.3.&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-xml&quot;&gt;&lt;span class=&quot;nt&quot;&gt;&amp;lt;dependency&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;groupId&amp;gt;&lt;/span&gt;org.apache.flink&lt;span class=&quot;nt&quot;&gt;&amp;lt;/groupId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;artifactId&amp;gt;&lt;/span&gt;flink-java&lt;span class=&quot;nt&quot;&gt;&amp;lt;/artifactId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;version&amp;gt;&lt;/span&gt;1.1.3&lt;span class=&quot;nt&quot;&gt;&amp;lt;/version&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;/dependency&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;dependency&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;groupId&amp;gt;&lt;/span&gt;org.apache.flink&lt;span class=&quot;nt&quot;&gt;&amp;lt;/groupId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;artifactId&amp;gt;&lt;/span&gt;flink-streaming-java_2.10&lt;span class=&quot;nt&quot;&gt;&amp;lt;/artifactId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;version&amp;gt;&lt;/span&gt;1.1.3&lt;span class=&quot;nt&quot;&gt;&amp;lt;/version&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;/dependency&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;dependency&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;groupId&amp;gt;&lt;/span&gt;org.apache.flink&lt;span class=&quot;nt&quot;&gt;&amp;lt;/groupId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;artifactId&amp;gt;&lt;/span&gt;flink-clients_2.10&lt;span class=&quot;nt&quot;&gt;&amp;lt;/artifactId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;version&amp;gt;&lt;/span&gt;1.1.3&lt;span class=&quot;nt&quot;&gt;&amp;lt;/version&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;/dependency&amp;gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;p&gt;You can find the binaries on the updated &lt;a href=&quot;http://flink.apache.org/downloads.html&quot;&gt;Downloads page&lt;/a&gt;.&lt;/p&gt;
+
+&lt;h2 id=&quot;note-for-rocksdb-backend-users&quot;&gt;Note for RocksDB Backend Users&lt;/h2&gt;
+
+&lt;p&gt;It is highly recommended to use the \u201cfully async\u201d mode for the RocksDB state backend. The \u201cfully async\u201d mode will most likely allow you to easily upgrade to Flink 1.2 (via &lt;a href=&quot;https://ci.apache.org/projects/flink/flink-docs-release-1.1/apis/streaming/savepoints.html&quot;&gt;savepoints&lt;/a&gt;) when it is released. The \u201csemi async\u201d mode will no longer be supported by Flink 1.2.&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-java&quot;&gt;&lt;span class=&quot;n&quot;&gt;RocksDBStateBackend&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;backend&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;=&lt;/span&gt; &lt;span class=&quot;k&quot;&gt;new&lt;/span&gt; &lt;span class=&quot;nf&quot;&gt;RocksDBStateBackend&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;s&quot;&gt;&amp;quot;...&amp;quot;&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;);&lt;/span&gt;
+&lt;span class=&quot;n&quot;&gt;backend&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;enableFullyAsyncSnapshots&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;();&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;h2 id=&quot;release-notes---flink---version-113&quot;&gt;Release Notes - Flink - Version 1.1.3&lt;/h2&gt;
+
+&lt;h2&gt;        Bug
+&lt;/h2&gt;
+&lt;ul&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-2662&quot;&gt;FLINK-2662&lt;/a&gt;] -         CompilerException: &amp;quot;Bug: Plan generation for Unions picked a ship strategy between binary plan operators.&amp;quot;
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4311&quot;&gt;FLINK-4311&lt;/a&gt;] -         TableInputFormat fails when reused on next split
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4329&quot;&gt;FLINK-4329&lt;/a&gt;] -         Fix Streaming File Source Timestamps/Watermarks Handling
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4485&quot;&gt;FLINK-4485&lt;/a&gt;] -         Finished jobs in yarn session fill /tmp filesystem
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4513&quot;&gt;FLINK-4513&lt;/a&gt;] -         Kafka connector documentation refers to Flink 1.1-SNAPSHOT
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4514&quot;&gt;FLINK-4514&lt;/a&gt;] -         ExpiredIteratorException in Kinesis Consumer on long catch-ups to head of stream
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4540&quot;&gt;FLINK-4540&lt;/a&gt;] -         Detached job execution may prevent cluster shutdown
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4544&quot;&gt;FLINK-4544&lt;/a&gt;] -         TaskManager metrics are vulnerable to custom JMX bean installation
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4566&quot;&gt;FLINK-4566&lt;/a&gt;] -         ProducerFailedException does not properly preserve Exception causes
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4588&quot;&gt;FLINK-4588&lt;/a&gt;] -         Fix Merging of Covering Window in MergingWindowSet
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4589&quot;&gt;FLINK-4589&lt;/a&gt;] -         Fix Merging of Covering Window in MergingWindowSet
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4616&quot;&gt;FLINK-4616&lt;/a&gt;] -         Kafka consumer doesn&amp;#39;t store last emmited watermarks per partition in state
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4618&quot;&gt;FLINK-4618&lt;/a&gt;] -         FlinkKafkaConsumer09 should start from the next record on startup from offsets in Kafka
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4619&quot;&gt;FLINK-4619&lt;/a&gt;] -         JobManager does not answer to client when restore from savepoint fails
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4636&quot;&gt;FLINK-4636&lt;/a&gt;] -         AbstractCEPPatternOperator fails to restore state
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4640&quot;&gt;FLINK-4640&lt;/a&gt;] -         Serialization of the initialValue of a Fold on WindowedStream fails
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4651&quot;&gt;FLINK-4651&lt;/a&gt;] -         Re-register processing time timers at the WindowOperator upon recovery.
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4663&quot;&gt;FLINK-4663&lt;/a&gt;] -         Flink JDBCOutputFormat logs wrong WARN message
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4672&quot;&gt;FLINK-4672&lt;/a&gt;] -         TaskManager accidentally decorates Kill messages
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4677&quot;&gt;FLINK-4677&lt;/a&gt;] -         Jars with no job executions produces NullPointerException in ClusterClient
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4702&quot;&gt;FLINK-4702&lt;/a&gt;] -         Kafka consumer must commit offsets asynchronously
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4727&quot;&gt;FLINK-4727&lt;/a&gt;] -         Kafka 0.9 Consumer should also checkpoint auto retrieved offsets even when no data is read
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4732&quot;&gt;FLINK-4732&lt;/a&gt;] -         Maven junction plugin security threat
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4777&quot;&gt;FLINK-4777&lt;/a&gt;] -         ContinuousFileMonitoringFunction may throw IOException when files are moved
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4788&quot;&gt;FLINK-4788&lt;/a&gt;] -         State backend class cannot be loaded, because fully qualified name converted to lower-case
+&lt;/li&gt;
+&lt;/ul&gt;
+
+&lt;h2&gt;        Improvement
+&lt;/h2&gt;
+&lt;ul&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4396&quot;&gt;FLINK-4396&lt;/a&gt;] -         GraphiteReporter class not found at startup of jobmanager
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4574&quot;&gt;FLINK-4574&lt;/a&gt;] -         Strengthen fetch interval implementation in Kinesis consumer
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4723&quot;&gt;FLINK-4723&lt;/a&gt;] -         Unify behaviour of committed offsets to Kafka / ZK for Kafka 0.8 and 0.9 consumer
+&lt;/li&gt;
+&lt;/ul&gt;
+</description>
+<pubDate>Wed, 12 Oct 2016 11:00:00 +0200</pubDate>
+<link>http://flink.apache.org/news/2016/10/12/release-1.1.3.html</link>
+<guid isPermaLink="true">/news/2016/10/12/release-1.1.3.html</guid>
+</item>
+
+<item>
+<title>Apache Flink 1.1.2 Released</title>
+<description>&lt;p&gt;The Apache Flink community released another bugfix version of the Apache Flink 1.1. series.&lt;/p&gt;
+
+&lt;p&gt;We recommend all users to upgrade to Flink 1.1.2.&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-xml&quot;&gt;&lt;span class=&quot;nt&quot;&gt;&amp;lt;dependency&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;groupId&amp;gt;&lt;/span&gt;org.apache.flink&lt;span class=&quot;nt&quot;&gt;&amp;lt;/groupId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;artifactId&amp;gt;&lt;/span&gt;flink-java&lt;span class=&quot;nt&quot;&gt;&amp;lt;/artifactId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;version&amp;gt;&lt;/span&gt;1.1.2&lt;span class=&quot;nt&quot;&gt;&amp;lt;/version&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;/dependency&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;dependency&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;groupId&amp;gt;&lt;/span&gt;org.apache.flink&lt;span class=&quot;nt&quot;&gt;&amp;lt;/groupId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;artifactId&amp;gt;&lt;/span&gt;flink-streaming-java_2.10&lt;span class=&quot;nt&quot;&gt;&amp;lt;/artifactId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;version&amp;gt;&lt;/span&gt;1.1.2&lt;span class=&quot;nt&quot;&gt;&amp;lt;/version&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;/dependency&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;dependency&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;groupId&amp;gt;&lt;/span&gt;org.apache.flink&lt;span class=&quot;nt&quot;&gt;&amp;lt;/groupId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;artifactId&amp;gt;&lt;/span&gt;flink-clients_2.10&lt;span class=&quot;nt&quot;&gt;&amp;lt;/artifactId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;version&amp;gt;&lt;/span&gt;1.1.2&lt;span class=&quot;nt&quot;&gt;&amp;lt;/version&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;/dependency&amp;gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;p&gt;You can find the binaries on the updated &lt;a href=&quot;http://flink.apache.org/downloads.html&quot;&gt;Downloads page&lt;/a&gt;.&lt;/p&gt;
+
+&lt;h2&gt;Release Notes - Flink - Version 1.1.2&lt;/h2&gt;
+
+&lt;ul&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4236&quot;&gt;FLINK-4236&lt;/a&gt;] -         Flink Dashboard stops showing list of uploaded jars if main method cannot be looked up
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4309&quot;&gt;FLINK-4309&lt;/a&gt;] -         Potential null pointer dereference in DelegatingConfiguration#keySet()
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4334&quot;&gt;FLINK-4334&lt;/a&gt;] -         Shaded Hadoop1 jar not fully excluded in Quickstart
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4341&quot;&gt;FLINK-4341&lt;/a&gt;] -         Kinesis connector does not emit maximum watermark properly
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4402&quot;&gt;FLINK-4402&lt;/a&gt;] -         Wrong metrics parameter names in documentation 
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4409&quot;&gt;FLINK-4409&lt;/a&gt;] -         class conflict between jsr305-1.3.9.jar and flink-shaded-hadoop2-1.1.1.jar
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4411&quot;&gt;FLINK-4411&lt;/a&gt;] -         [py] Chained dual input children are not properly propagated
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4412&quot;&gt;FLINK-4412&lt;/a&gt;] -         [py] Chaining does not properly handle broadcast variables
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4425&quot;&gt;FLINK-4425&lt;/a&gt;] -         &amp;quot;Out Of Memory&amp;quot; during savepoint deserialization
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4454&quot;&gt;FLINK-4454&lt;/a&gt;] -         Lookups for JobManager address in config
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4480&quot;&gt;FLINK-4480&lt;/a&gt;] -         Incorrect link to elastic.co in documentation
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4486&quot;&gt;FLINK-4486&lt;/a&gt;] -         JobManager not fully running when yarn-session.sh finishes
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4488&quot;&gt;FLINK-4488&lt;/a&gt;] -         Prevent cluster shutdown after job execution for non-detached jobs
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4514&quot;&gt;FLINK-4514&lt;/a&gt;] -         ExpiredIteratorException in Kinesis Consumer on long catch-ups to head of stream
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4526&quot;&gt;FLINK-4526&lt;/a&gt;] -         ApplicationClient: remove redundant proxy messages
+&lt;/li&gt;
+
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-3866&quot;&gt;FLINK-3866&lt;/a&gt;] -         StringArraySerializer claims type is immutable; shouldn&amp;#39;t
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-3899&quot;&gt;FLINK-3899&lt;/a&gt;] -         Document window processing with Reduce/FoldFunction + WindowFunction
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4302&quot;&gt;FLINK-4302&lt;/a&gt;] -         Add JavaDocs to MetricConfig
+&lt;/li&gt;
+&lt;li&gt;[&lt;a href=&quot;https://issues.apache.org/jira/browse/FLINK-4495&quot;&gt;FLINK-4495&lt;/a&gt;] -         Running multiple jobs on yarn (without yarn-session)
+&lt;/li&gt;
+&lt;/ul&gt;
+
+</description>
+<pubDate>Mon, 05 Sep 2016 11:00:00 +0200</pubDate>
+<link>http://flink.apache.org/news/2016/09/05/release-1.1.2.html</link>
+<guid isPermaLink="true">/news/2016/09/05/release-1.1.2.html</guid>
+</item>
+
+<item>
+<title>Flink Forward 2016: Announcing Schedule, Keynotes, and Panel Discussion</title>
+<description>&lt;p&gt;An update for the Flink community: the &lt;a href=&quot;http://flink-forward.org/kb_day/day-1/&quot;&gt;Flink Forward 2016 schedule&lt;/a&gt; is now available online. This year&#39;s event will include 2 days of talks from stream processing experts at Google, MapR, Alibaba, Netflix, Cloudera, and more. Following the talks is a full day of hands-on Flink training.&lt;/p&gt;
+
+&lt;p&gt;Ted Dunning has been announced as a keynote speaker at the event. Ted is the VP of Incubator at &lt;a href=&quot;http://www.apache.org&quot;&gt;Apache Software Foundation&lt;/a&gt;, the Chief Application Architect at &lt;a href=&quot;http://www.mapr.com&quot;&gt;MapR Technologies&lt;/a&gt;, and a mentor on many recent projects. He&#39;ll present &lt;a href=&quot;http://flink-forward.org/kb_sessions/keynote-tba/&quot;&gt;&quot;How Can We Take Flink Forward?&quot;&lt;/a&gt; on the second day of the conference.&lt;/p&gt;
+
+&lt;p&gt;Following Ted&#39;s keynote there will be a panel discussion on &lt;a href=&quot;http://flink-forward.org/kb_sessions/panel-large-scale-streaming-in-production/&quot;&gt;&quot;Large Scale Streaming in Production&quot;&lt;/a&gt;. As stream processing systems become more mainstream, companies are looking to empower their users to take advantage of this technology. We welcome leading stream processing experts Xiaowei Jiang &lt;a href=&quot;http://www.alibaba.com&quot;&gt;(Alibaba)&lt;/a&gt;, Monal Daxini &lt;a href=&quot;http://www.netflix.com&quot;&gt;(Netflix)&lt;/a&gt;, Maxim Fateev &lt;a href=&quot;http://www.uber.com&quot;&gt;(Uber)&lt;/a&gt;, and Ted Dunning &lt;a href=&quot;http://www.mapr.com&quot;&gt;(MapR Technologies)&lt;/a&gt; on stage to talk about the challenges they have faced and the solutions they have discovered while implementing stream processing systems at very large scale. The panel will be moderated by Jamie Grier &lt;a href=&quot;http://www.data-artisan
 s.com&quot;&gt;(data Artisans)&lt;/a&gt;.&lt;/p&gt;
+
+&lt;p&gt;The welcome keynote on Monday, September 12, will be presented by data Artisans&#39; co-founders Kostas Tzoumas and Stephan Ewen. They will talk about &lt;a href=&quot;http://flink-forward.org/kb_sessions/keynote-tba-2/&quot;&gt;&quot;The maturing data streaming ecosystem and Apache Flink\u2019s accelerated growth&quot;&lt;/a&gt;. In this talk, Kostas and Stephan discuss several large-scale stream processing use cases that the data Artisans team has seen over the past year.&lt;/p&gt;
+
+&lt;p&gt;And one more recent addition to the program: Maxim Fateev of Uber will present &lt;a href=&quot;http://flink-forward.org/kb_sessions/beyond-the-watermark-on-demand-backfilling-in-flink/&quot;&gt;&quot;Beyond the Watermark: On-Demand Backfilling in Flink&quot;&lt;/a&gt;. Flink\u2019s time-progress model is built around a single watermark, which is incompatible with Uber\u2019s business need for generating aggregates retroactively. Maxim&#39;s talk covers Uber&#39;s solution for on-demand backfilling.&lt;/p&gt;
+
+&lt;p&gt;We hope to see many community members at Flink Forward 2016. Registration is available online: &lt;a href=&quot;http://flink-forward.org/registration/&quot;&gt;flink-forward.org/registration&lt;/a&gt;
+&lt;/p&gt;
+</description>
+<pubDate>Wed, 24 Aug 2016 11:00:00 +0200</pubDate>
+<link>http://flink.apache.org/news/2016/08/24/ff16-keynotes-panels.html</link>
+<guid isPermaLink="true">/news/2016/08/24/ff16-keynotes-panels.html</guid>
+</item>
+
+<item>
+<title>Flink 1.1.1 Released</title>
+<description>&lt;p&gt;Today, the Flink community released Flink version 1.1.1.&lt;/p&gt;
+
+&lt;p&gt;The Maven artifacts published on Maven central for 1.1.0 had a Hadoop dependency issue: No Hadoop 1 specific version (with version 1.1.0-hadoop1) was deployed and 1.1.0 artifacts have a dependency on Hadoop 1 instead of Hadoop 2.&lt;/p&gt;
+
+&lt;p&gt;This was fixed with this release and we &lt;strong&gt;highly recommend&lt;/strong&gt; all users to use this version of Flink by bumping your Flink dependencies to version 1.1.1:&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-xml&quot;&gt;&lt;span class=&quot;nt&quot;&gt;&amp;lt;dependency&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;groupId&amp;gt;&lt;/span&gt;org.apache.flink&lt;span class=&quot;nt&quot;&gt;&amp;lt;/groupId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;artifactId&amp;gt;&lt;/span&gt;flink-java&lt;span class=&quot;nt&quot;&gt;&amp;lt;/artifactId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;version&amp;gt;&lt;/span&gt;1.1.1&lt;span class=&quot;nt&quot;&gt;&amp;lt;/version&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;/dependency&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;dependency&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;groupId&amp;gt;&lt;/span&gt;org.apache.flink&lt;span class=&quot;nt&quot;&gt;&amp;lt;/groupId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;artifactId&amp;gt;&lt;/span&gt;flink-streaming-java_2.10&lt;span class=&quot;nt&quot;&gt;&amp;lt;/artifactId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;version&amp;gt;&lt;/span&gt;1.1.1&lt;span class=&quot;nt&quot;&gt;&amp;lt;/version&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;/dependency&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;dependency&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;groupId&amp;gt;&lt;/span&gt;org.apache.flink&lt;span class=&quot;nt&quot;&gt;&amp;lt;/groupId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;artifactId&amp;gt;&lt;/span&gt;flink-clients_2.10&lt;span class=&quot;nt&quot;&gt;&amp;lt;/artifactId&amp;gt;&lt;/span&gt;
+  &lt;span class=&quot;nt&quot;&gt;&amp;lt;version&amp;gt;&lt;/span&gt;1.1.1&lt;span class=&quot;nt&quot;&gt;&amp;lt;/version&amp;gt;&lt;/span&gt;
+&lt;span class=&quot;nt&quot;&gt;&amp;lt;/dependency&amp;gt;&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;p&gt;You can find the binaries on the updated &lt;a href=&quot;http://flink.apache.org/downloads.html&quot;&gt;Downloads page&lt;/a&gt;.&lt;/p&gt;
+</description>
+<pubDate>Thu, 11 Aug 2016 11:00:00 +0200</pubDate>
+<link>http://flink.apache.org/news/2016/08/11/release-1.1.1.html</link>
+<guid isPermaLink="true">/news/2016/08/11/release-1.1.1.html</guid>
+</item>
+
+<item>
+<title>Announcing Apache Flink 1.1.0</title>
+<description>&lt;div class=&quot;alert alert-success&quot;&gt;&lt;strong&gt;Important&lt;/strong&gt;: The Maven artifacts published with version 1.1.0 on Maven central have a Hadoop dependency issue. It is highly recommended to use &lt;strong&gt;1.1.1&lt;/strong&gt; or &lt;strong&gt;1.1.1-hadoop1&lt;/strong&gt; as the Flink version.&lt;/div&gt;
+
+&lt;p&gt;The Apache Flink community is pleased to announce the availability of Flink 1.1.0.&lt;/p&gt;
+
+&lt;p&gt;This release is the first major release in the 1.X.X series of releases, which maintains API compatibility with 1.0.0. This means that your applications written against stable APIs of Flink 1.0.0 will compile and run with Flink 1.1.0. 95 contributors provided bug fixes, improvements, and new features such that in total more than 450 JIRA issues could be resolved. See the &lt;a href=&quot;/blog/release_1.1.0-changelog.html&quot;&gt;complete changelog&lt;/a&gt; for more details.&lt;/p&gt;
+
+&lt;p&gt;&lt;strong&gt;We encourage everyone to &lt;a href=&quot;http://flink.apache.org/downloads.html&quot;&gt;download the release&lt;/a&gt; and &lt;a href=&quot;https://ci.apache.org/projects/flink/flink-docs-release-1.1/&quot;&gt;check out the documentation&lt;/a&gt;. Feedback through the Flink &lt;a href=&quot;http://flink.apache.org/community.html#mailing-lists&quot;&gt;mailing lists&lt;/a&gt; is, as always, very welcome!&lt;/strong&gt;&lt;/p&gt;
+
+&lt;p&gt;Some highlights of the release are listed in the following sections.&lt;/p&gt;
+
+&lt;h2 id=&quot;connectors&quot;&gt;Connectors&lt;/h2&gt;
+
+&lt;p&gt;The &lt;a href=&quot;https://ci.apache.org/projects/flink/flink-docs-release-1.1/apis/streaming/connectors/index.html&quot;&gt;streaming connectors&lt;/a&gt; are a major part of Flink\u2019s DataStream API. This release adds support for new external systems and further improves on the available connectors.&lt;/p&gt;
+
+&lt;h3 id=&quot;continuous-file-system-sources&quot;&gt;Continuous File System Sources&lt;/h3&gt;
+
+&lt;p&gt;A frequently requested feature for Flink 1.0 was to be able to monitor directories and process files continuously. Flink 1.1 now adds support for this via &lt;code&gt;FileProcessingMode&lt;/code&gt;s:&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-java&quot;&gt;&lt;span class=&quot;n&quot;&gt;DataStream&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;&amp;lt;&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;String&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;&amp;gt;&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;stream&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;=&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;env&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;readFile&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;
+  &lt;span class=&quot;n&quot;&gt;textInputFormat&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt;
+  &lt;span class=&quot;s&quot;&gt;&amp;quot;hdfs:///file-path&amp;quot;&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt;
+  &lt;span class=&quot;n&quot;&gt;FileProcessingMode&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;PROCESS_CONTINUOUSLY&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt;
+  &lt;span class=&quot;mi&quot;&gt;5000&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt; &lt;span class=&quot;c1&quot;&gt;// monitoring interval (millis)&lt;/span&gt;
+  &lt;span class=&quot;n&quot;&gt;FilePathFilter&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;createDefaultFilter&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;());&lt;/span&gt; &lt;span class=&quot;c1&quot;&gt;// file path filter&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;p&gt;This will monitor &lt;code&gt;hdfs:///file-path&lt;/code&gt; every &lt;code&gt;5000&lt;/code&gt; milliseconds. Check out the &lt;a href=&quot;https://ci.apache.org/projects/flink/flink-docs-release-1.1/apis/streaming/index.html#data-sources&quot;&gt;DataSource documentation for more details&lt;/a&gt;.&lt;/p&gt;
+
+&lt;h3 id=&quot;kinesis-source-and-sink&quot;&gt;Kinesis Source and Sink&lt;/h3&gt;
+
+&lt;p&gt;Flink 1.1 adds a Kinesis connector for both consuming (&lt;code&gt;FlinkKinesisConsumer&lt;/code&gt;) from and producing (&lt;code&gt;FlinkKinesisProduer&lt;/code&gt;) to &lt;a href=&quot;https://aws.amazon.com/kinesis/&quot;&gt;Amazon Kinesis Streams&lt;/a&gt;, which is a managed service purpose-built to make it easy to work with streaming data on AWS.&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-java&quot;&gt;&lt;span class=&quot;n&quot;&gt;DataStream&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;&amp;lt;&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;String&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;&amp;gt;&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;kinesis&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;=&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;env&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;addSource&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;
+  &lt;span class=&quot;k&quot;&gt;new&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;FlinkKinesisConsumer&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;&amp;lt;&amp;gt;(&lt;/span&gt;&lt;span class=&quot;s&quot;&gt;&amp;quot;stream-name&amp;quot;&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;schema&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;config&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;));&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;p&gt;Check out the &lt;a href=&quot;https://ci.apache.org/projects/flink/flink-docs-release-1.1/apis/streaming/connectors/kinesis.html&quot;&gt;Kinesis connector documentation for more details&lt;/a&gt;.&lt;/p&gt;
+
+&lt;h3 id=&quot;cassandra-sink&quot;&gt;Cassandra Sink&lt;/h3&gt;
+
+&lt;p&gt;The &lt;a href=&quot;http://wiki.apache.org/cassandra/GettingStarted&quot;&gt;Apache Cassandra&lt;/a&gt; sink allows you to write from Flink to Cassandra. Flink can provide exactly-once guarantees if the query is idempotent, meaning it can be applied multiple times without changing the result.&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-java&quot;&gt;&lt;span class=&quot;n&quot;&gt;CassandraSink&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;addSink&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;input&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;)&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;p&gt;Check out the &lt;a href=&quot;https://ci.apache.org/projects/flink/flink-docs-release-1.1/apis/streaming/connectors/cassandra.html&quot;&gt;Cassandra Sink documentation for more details&lt;/a&gt;.&lt;/p&gt;
+
+&lt;h2 id=&quot;table-api-and-sql&quot;&gt;Table API and SQL&lt;/h2&gt;
+
+&lt;p&gt;The Table API is a SQL-like expression language for relational stream and batch processing that can be easily embedded in Flink\u2019s DataSet and DataStream APIs (for both Java and Scala).&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-java&quot;&gt;&lt;span class=&quot;n&quot;&gt;Table&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;custT&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;=&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;tableEnv&lt;/span&gt;
+  &lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;toTable&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;custDs&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt; &lt;span class=&quot;s&quot;&gt;&amp;quot;name, zipcode&amp;quot;&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;)&lt;/span&gt;
+  &lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;where&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;s&quot;&gt;&amp;quot;zipcode = &amp;#39;12345&amp;#39;&amp;quot;&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;)&lt;/span&gt;
+  &lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;select&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;s&quot;&gt;&amp;quot;name&amp;quot;&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;)&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;p&gt;An initial version of this API was already available in Flink 1.0. For Flink 1.1, the community put a lot of work into reworking the architecture of the Table API and integrating it with &lt;a href=&quot;https://calcite.apache.org&quot;&gt;Apache Calcite&lt;/a&gt;.&lt;/p&gt;
+
+&lt;p&gt;In this first version, SQL (and Table API) queries on streams are limited to selection, filter, and union operators. Compared to Flink 1.0, the revised Table API supports many more scalar functions and is able to read tables from external sources and write them back to external sinks.&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-java&quot;&gt;&lt;span class=&quot;n&quot;&gt;Table&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;result&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;=&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;tableEnv&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;sql&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;
+  &lt;span class=&quot;s&quot;&gt;&amp;quot;SELECT STREAM product, amount FROM Orders WHERE product LIKE &amp;#39;%Rubber%&amp;#39;&amp;quot;&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;);&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+&lt;p&gt;A more detailed introduction can be found in the &lt;a href=&quot;http://flink.apache.org/news/2016/05/24/stream-sql.html&quot;&gt;Flink blog&lt;/a&gt; and the &lt;a href=&quot;https://ci.apache.org/projects/flink/flink-docs-release-1.1/apis/table.html&quot;&gt;Table API documentation&lt;/a&gt;.&lt;/p&gt;
+
+&lt;h2 id=&quot;datastream-api&quot;&gt;DataStream API&lt;/h2&gt;
+
+&lt;p&gt;The DataStream API now exposes &lt;strong&gt;session windows&lt;/strong&gt; and &lt;strong&gt;allowed lateness&lt;/strong&gt; as first-class citizens.&lt;/p&gt;
+
+&lt;h3 id=&quot;session-windows&quot;&gt;Session Windows&lt;/h3&gt;
+
+&lt;p&gt;Session windows are ideal for cases where the window boundaries need to adjust to the incoming data. This enables you to have windows that start at individual points in time for each key and that end once there has been a &lt;em&gt;certain period of inactivity&lt;/em&gt;. The configuration parameter is the session gap that specifies how long to wait for new data before considering a session as closed.&lt;/p&gt;
+
+&lt;center&gt;
+&lt;img src=&quot;/img/blog/session-windows.svg&quot; style=&quot;height:400px&quot; /&gt;
+&lt;/center&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-java&quot;&gt;&lt;span class=&quot;n&quot;&gt;input&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;keyBy&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&amp;lt;&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;key&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;selector&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;&amp;gt;)&lt;/span&gt;
+    &lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;window&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;EventTimeSessionWindows&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;withGap&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;Time&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;minutes&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;mi&quot;&gt;10&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;)))&lt;/span&gt;
+    &lt;span class=&quot;o&quot;&gt;.&amp;lt;&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;windowed&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;transformation&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;&amp;gt;(&amp;lt;&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;window&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;function&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;&amp;gt;);&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;h3 id=&quot;support-for-late-elements&quot;&gt;Support for Late Elements&lt;/h3&gt;
+
+&lt;p&gt;You can now specify how a windowed transformation should deal with late elements and how much lateness is allowed. The parameter for this is called &lt;em&gt;allowed lateness&lt;/em&gt;. This specifies by how much time elements can be late.&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-java&quot;&gt;&lt;span class=&quot;n&quot;&gt;input&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;keyBy&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&amp;lt;&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;key&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;selector&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;&amp;gt;).&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;window&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&amp;lt;&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;window&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;assigner&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;&amp;gt;)&lt;/span&gt;
+    &lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;allowedLateness&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&amp;lt;&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;time&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;&amp;gt;)&lt;/span&gt;
+    &lt;span class=&quot;o&quot;&gt;.&amp;lt;&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;windowed&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;transformation&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;&amp;gt;(&amp;lt;&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;window&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;function&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;&amp;gt;);&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;p&gt;Elements that arrive within the allowed lateness are still put into windows and are considered when computing window results. If elements arrive after the allowed lateness they will be dropped. Flink will also make sure that any state held by the windowing operation is garbage collected once the watermark passes the end of a window plus the allowed lateness.&lt;/p&gt;
+
+&lt;p&gt;Check out the &lt;a href=&quot;https://ci.apache.org/projects/flink/flink-docs-release-1.1/apis/streaming/windows.html&quot;&gt;Windows documentation for more details&lt;/a&gt;.&lt;/p&gt;
+
+&lt;h2 id=&quot;scala-api-for-complex-event-processing-cep&quot;&gt;Scala API for Complex Event Processing (CEP)&lt;/h2&gt;
+
+&lt;p&gt;Flink 1.0 added the initial version of the CEP library. The core of the library is a Pattern API, which allows you to easily specify patterns to match against in your event stream. While in Flink 1.0 this API was only available for Java, Flink 1.1. now exposes the same API for Scala, allowing you to specify your event patterns in a more concise manner.&lt;/p&gt;
+
+&lt;p&gt;A more detailed introduction can be found in the &lt;a href=&quot;http://flink.apache.org/news/2016/04/06/cep-monitoring.html&quot;&gt;Flink blog&lt;/a&gt; and the &lt;a href=&quot;https://ci.apache.org/projects/flink/flink-docs-release-1.1/apis/streaming/libs/cep.html&quot;&gt;CEP documentation&lt;/a&gt;.&lt;/p&gt;
+
+&lt;h2 id=&quot;graph-generators-and-new-gelly-library-algorithms&quot;&gt;Graph generators and new Gelly library algorithms&lt;/h2&gt;
+
+&lt;p&gt;This release includes many enhancements and new features for graph processing. Gelly now provides a collection of scalable graph generators for common graph types, such as complete, cycle, grid, hypercube, and RMat graphs. A variety of new graph algorithms have been added to the Gelly library, including Global and Local Clustering Coefficient, HITS, and similarity measures (Jaccard and Adamic-Adar).&lt;/p&gt;
+
+&lt;p&gt;For a full list of new graph processing features, check out the &lt;a href=&quot;https://ci.apache.org/projects/flink/flink-docs-release-1.1/apis/batch/libs/gelly.html&quot;&gt;Gelly documentation&lt;/a&gt;.&lt;/p&gt;
+
+&lt;h2 id=&quot;metrics&quot;&gt;Metrics&lt;/h2&gt;
+
+&lt;p&gt;Flink\u2019s new metrics system allows you to easily gather and expose metrics from your user application to external systems. You can add counters, gauges, and histograms to your application via the runtime context:&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-java&quot;&gt;&lt;span class=&quot;n&quot;&gt;Counter&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;counter&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;=&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;getRuntimeContext&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;()&lt;/span&gt;
+  &lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;getMetricGroup&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;()&lt;/span&gt;
+  &lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;na&quot;&gt;counter&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;s&quot;&gt;&amp;quot;my-counter&amp;quot;&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;);&lt;/span&gt;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;
+
+&lt;p&gt;All registered metrics will be exposed via reporters. Out of the box, Flinks comes with support for JMX, Ganglia, Graphite, and statsD. In addition to your custom metrics, Flink exposes many internal metrics like checkpoint sizes and JVM stats.&lt;/p&gt;
+
+&lt;p&gt;Check out the &lt;a href=&quot;https://ci.apache.org/projects/flink/flink-docs-release-1.1/apis/metrics.html&quot;&gt;Metrics documentation for more details&lt;/a&gt;.&lt;/p&gt;
+
+&lt;h2 id=&quot;list-of-contributors&quot;&gt;List of Contributors&lt;/h2&gt;
+
+&lt;p&gt;The following 95 people contributed to this release:&lt;/p&gt;
+
+&lt;ul&gt;
+  &lt;li&gt;Abdullah Ozturk&lt;/li&gt;
+  &lt;li&gt;Ajay Bhat&lt;/li&gt;
+  &lt;li&gt;Alexey Savartsov&lt;/li&gt;
+  &lt;li&gt;Aljoscha Krettek&lt;/li&gt;
+  &lt;li&gt;Andrea Sella&lt;/li&gt;
+  &lt;li&gt;Andrew Palumbo&lt;/li&gt;
+  &lt;li&gt;Chenguang He&lt;/li&gt;
+  &lt;li&gt;Chiwan Park&lt;/li&gt;
+  &lt;li&gt;David Moravek&lt;/li&gt;
+  &lt;li&gt;Dominik Bruhn&lt;/li&gt;
+  &lt;li&gt;Dyana Rose&lt;/li&gt;
+  &lt;li&gt;Fabian Hueske&lt;/li&gt;
+  &lt;li&gt;Flavio Pompermaier&lt;/li&gt;
+  &lt;li&gt;Gabor Gevay&lt;/li&gt;
+  &lt;li&gt;Gabor Horvath&lt;/li&gt;
+  &lt;li&gt;Geoffrey Mon&lt;/li&gt;
+  &lt;li&gt;Gordon Tai&lt;/li&gt;
+  &lt;li&gt;Greg Hogan&lt;/li&gt;
+  &lt;li&gt;Gyula Fora&lt;/li&gt;
+  &lt;li&gt;Henry Saputra&lt;/li&gt;
+  &lt;li&gt;Ignacio N. Lucero Ascencio&lt;/li&gt;
+  &lt;li&gt;Igor Berman&lt;/li&gt;
+  &lt;li&gt;Isma�l Mej�a&lt;/li&gt;
+  &lt;li&gt;Ivan Mushketyk&lt;/li&gt;
+  &lt;li&gt;Jark Wu&lt;/li&gt;
+  &lt;li&gt;Jiri Simsa&lt;/li&gt;
+  &lt;li&gt;Jonas Traub&lt;/li&gt;
+  &lt;li&gt;Josh&lt;/li&gt;
+  &lt;li&gt;Joshi&lt;/li&gt;
+  &lt;li&gt;Joshua Herman&lt;/li&gt;
+  &lt;li&gt;Ken Krugler&lt;/li&gt;
+  &lt;li&gt;Konstantin Knauf&lt;/li&gt;
+  &lt;li&gt;Lasse Dalegaard&lt;/li&gt;
+  &lt;li&gt;Li Fanxi&lt;/li&gt;
+  &lt;li&gt;MaBiao&lt;/li&gt;
+  &lt;li&gt;Mao Wei&lt;/li&gt;
+  &lt;li&gt;Mark Reddy&lt;/li&gt;
+  &lt;li&gt;Martin Junghanns&lt;/li&gt;
+  &lt;li&gt;Martin Liesenberg&lt;/li&gt;
+  &lt;li&gt;Maximilian Michels&lt;/li&gt;
+  &lt;li&gt;Michal Fijolek&lt;/li&gt;
+  &lt;li&gt;M�rton Balassi&lt;/li&gt;
+  &lt;li&gt;Nathan Howell&lt;/li&gt;
+  &lt;li&gt;Niels Basjes&lt;/li&gt;
+  &lt;li&gt;Niels Zeilemaker&lt;/li&gt;
+  &lt;li&gt;Phetsarath, Sourigna&lt;/li&gt;
+  &lt;li&gt;Robert Metzger&lt;/li&gt;
+  &lt;li&gt;Scott Kidder&lt;/li&gt;
+  &lt;li&gt;Sebastian Klemke&lt;/li&gt;
+  &lt;li&gt;Shahin&lt;/li&gt;
+  &lt;li&gt;Shannon Carey&lt;/li&gt;
+  &lt;li&gt;Shannon Quinn&lt;/li&gt;
+  &lt;li&gt;Stefan Richter&lt;/li&gt;
+  &lt;li&gt;Stefano Baghino&lt;/li&gt;
+  &lt;li&gt;Stefano Bortoli&lt;/li&gt;
+  &lt;li&gt;Stephan Ewen&lt;/li&gt;
+  &lt;li&gt;Steve Cosenza&lt;/li&gt;
+  &lt;li&gt;Sumit Chawla&lt;/li&gt;
+  &lt;li&gt;Tatu Saloranta&lt;/li&gt;
+  &lt;li&gt;Tianji Li&lt;/li&gt;
+  &lt;li&gt;Till Rohrmann&lt;/li&gt;
+  &lt;li&gt;Todd Lisonbee&lt;/li&gt;
+  &lt;li&gt;Tony Baines&lt;/li&gt;
+  &lt;li&gt;Trevor Grant&lt;/li&gt;
+  &lt;li&gt;Ufuk Celebi&lt;/li&gt;
+  &lt;li&gt;Vasudevan&lt;/li&gt;
+  &lt;li&gt;Yijie Shen&lt;/li&gt;
+  &lt;li&gt;Zack Pierce&lt;/li&gt;
+  &lt;li&gt;Zhai Jia&lt;/li&gt;
+  &lt;li&gt;chengxiang li&lt;/li&gt;
+  &lt;li&gt;chobeat&lt;/li&gt;
+  &lt;li&gt;danielblazevski&lt;/li&gt;
+  &lt;li&gt;dawid&lt;/li&gt;
+  &lt;li&gt;dawidwys&lt;/li&gt;
+  &lt;li&gt;eastcirclek&lt;/li&gt;
+  &lt;li&gt;erli ding&lt;/li&gt;
+  &lt;li&gt;gallenvara&lt;/li&gt;
+  &lt;li&gt;kl0u&lt;/li&gt;
+  &lt;li&gt;mans2singh&lt;/li&gt;
+  &lt;li&gt;markreddy&lt;/li&gt;
+  &lt;li&gt;mjsax&lt;/li&gt;
+  &lt;li&gt;nikste&lt;/li&gt;
+  &lt;li&gt;omaralvarez&lt;/li&gt;
+  &lt;li&gt;philippgrulich&lt;/li&gt;
+  &lt;li&gt;ramkrishna&lt;/li&gt;
+  &lt;li&gt;sahitya-pavurala&lt;/li&gt;
+  &lt;li&gt;samaitra&lt;/li&gt;
+  &lt;li&gt;smarthi&lt;/li&gt;
+  &lt;li&gt;spkavuly&lt;/li&gt;
+  &lt;li&gt;subhankar&lt;/li&gt;
+  &lt;li&gt;twalthr&lt;/li&gt;
+  &lt;li&gt;vasia&lt;/li&gt;
+  &lt;li&gt;xueyan.li&lt;/li&gt;
+  &lt;li&gt;zentol&lt;/li&gt;
+  &lt;li&gt;\u536b\u4e50&lt;/li&gt;
+&lt;/ul&gt;
+</description>
+<pubDate>Mon, 08 Aug 2016 15:00:00 +0200</pubDate>
+<link>http://flink.apache.org/news/2016/08/08/release-1.1.0.html</link>
+<guid isPermaLink="true">/news/2016/08/08/release-1.1.0.html</guid>
+</item>
+
+<item>
+<title>Stream Processing for Everyone with SQL and Apache Flink</title>
+<description>&lt;p&gt;The capabilities of open source systems for distributed stream processing have evolved significantly over the last years. Initially, the first systems in the field (notably &lt;a href=&quot;https://storm.apache.org&quot;&gt;Apache Storm&lt;/a&gt;) provided low latency processing, but were limited to at-least-once guarantees, processing-time semantics, and rather low-level APIs. Since then, several new systems emerged and pushed the state of the art of open source stream processing in several dimensions. Today, users of Apache Flink or &lt;a href=&quot;https://beam.incubator.apache.org&quot;&gt;Apache Beam&lt;/a&gt; can use fluent Scala and Java APIs to implement stream processing jobs that operate in event-time with exactly-once semantics at high throughput and low latency.&lt;/p&gt;
+
+&lt;p&gt;In the meantime, stream processing has taken off in the industry. We are witnessing a rapidly growing interest in stream processing which is reflected by prevalent deployments of streaming processing infrastructure such as &lt;a href=&quot;https://kafka.apache.org&quot;&gt;Apache Kafka&lt;/a&gt; and Apache Flink. The increasing number of available data streams results in a demand for people that can analyze streaming data and turn it into real-time insights. However, stream data analysis requires a special skill set including knowledge of streaming concepts such as the characteristics of unbounded streams, windows, time, and state as well as the skills to implement stream analysis jobs usually against Java or Scala APIs. People with this skill set are rare and hard to find.&lt;/p&gt;
+
+&lt;p&gt;About six months ago, the Apache Flink community started an effort to add a SQL interface for stream data analysis. SQL is &lt;em&gt;the&lt;/em&gt; standard language to access and process data. Everybody who occasionally analyzes data is familiar with SQL. Consequently, a SQL interface for stream data processing will make this technology accessible to a much wider audience. Moreover, SQL support for streaming data will also enable new use cases such as interactive and ad-hoc stream analysis and significantly simplify many applications including stream ingestion and simple transformations. In this blog post, we report on the current status, architectural design, and future plans of the Apache Flink community to implement support for SQL as a language for analyzing data streams.&lt;/p&gt;
+
+&lt;h2 id=&quot;where-did-we-come-from&quot;&gt;Where did we come from?&lt;/h2&gt;
+
+&lt;p&gt;With the &lt;a href=&quot;http://flink.apache.org/news/2015/04/13/release-0.9.0-milestone1.html&quot;&gt;0.9.0-milestone1&lt;/a&gt; release, Apache Flink added an API to process relational data with SQL-like expressions called the Table API. The central concept of this API is a Table, a structured data set or stream on which relational operations can be applied. The Table API is tightly integrated with the DataSet and DataStream API. A Table can be easily created from a DataSet or DataStream and can also be converted back into a DataSet or DataStream as the following example shows&lt;/p&gt;
+
+&lt;div class=&quot;highlight&quot;&gt;&lt;pre&gt;&lt;code class=&quot;language-scala&quot;&gt;&lt;span class=&quot;k&quot;&gt;val&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;execEnv&lt;/span&gt; &lt;span class=&quot;k&quot;&gt;=&lt;/span&gt; &lt;span class=&quot;nc&quot;&gt;ExecutionEnvironment&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;getExecutionEnvironment&lt;/span&gt;
+&lt;span class=&quot;k&quot;&gt;val&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;tableEnv&lt;/span&gt; &lt;span class=&quot;k&quot;&gt;=&lt;/span&gt; &lt;span class=&quot;nc&quot;&gt;TableEnvironment&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;getTableEnvironment&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;execEnv&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;)&lt;/span&gt;
+
+&lt;span class=&quot;c1&quot;&gt;// obtain a DataSet from somewhere&lt;/span&gt;
+&lt;span class=&quot;k&quot;&gt;val&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;tempData&lt;/span&gt;&lt;span class=&quot;k&quot;&gt;:&lt;/span&gt; &lt;span class=&quot;kt&quot;&gt;DataSet&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;[(&lt;/span&gt;&lt;span class=&quot;kt&quot;&gt;String&lt;/span&gt;, &lt;span class=&quot;kt&quot;&gt;Long&lt;/span&gt;, &lt;span class=&quot;kt&quot;&gt;Double&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;)]&lt;/span&gt; &lt;span class=&quot;k&quot;&gt;=&lt;/span&gt;
+
+&lt;span class=&quot;c1&quot;&gt;// convert the DataSet to a Table&lt;/span&gt;
+&lt;span class=&quot;k&quot;&gt;val&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;tempTable&lt;/span&gt;&lt;span class=&quot;k&quot;&gt;:&lt;/span&gt; &lt;span class=&quot;kt&quot;&gt;Table&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;=&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;tempData&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;toTable&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;tableEnv&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt; &lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;location&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt; &lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;time&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt; &lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;tempF&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;)&lt;/span&gt;
+&lt;span class=&quot;c1&quot;&gt;// compute your result&lt;/span&gt;
+&lt;span class=&quot;k&quot;&gt;val&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;avgTempCTable&lt;/span&gt;&lt;span class=&quot;k&quot;&gt;:&lt;/span&gt; &lt;span class=&quot;kt&quot;&gt;Table&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;=&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;tempTable&lt;/span&gt;
+ &lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;where&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;location&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;like&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;s&quot;&gt;&amp;quot;room%&amp;quot;&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;))&lt;/span&gt;
+ &lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;select&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;
+   &lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;time&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;/&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;mi&quot;&gt;3600&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;*&lt;/span&gt; &lt;span class=&quot;mi&quot;&gt;24&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;))&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;as&lt;/span&gt; &lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;day&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt; 
+   &lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;Location&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;as&lt;/span&gt; &lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;room&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt; 
+   &lt;span class=&quot;o&quot;&gt;((&lt;/span&gt;&lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;tempF&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;-&lt;/span&gt; &lt;span class=&quot;mi&quot;&gt;32&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;)&lt;/span&gt; &lt;span class=&quot;o&quot;&gt;*&lt;/span&gt; &lt;span class=&quot;mf&quot;&gt;0.556&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;)&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;as&lt;/span&gt; &lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;tempC&lt;/span&gt;
+  &lt;span class=&quot;o&quot;&gt;)&lt;/span&gt;
+ &lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;groupBy&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;day&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt; &lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;room&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;)&lt;/span&gt;
+ &lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;select&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;(&lt;/span&gt;&lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;day&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt; &lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;room&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;,&lt;/span&gt; &lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;tempC&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;avg&lt;/span&gt; &lt;span class=&quot;n&quot;&gt;as&lt;/span&gt; &lt;span class=&quot;-Symbol&quot;&gt;&amp;#39;avgTempC&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;)&lt;/span&gt;
+&lt;span class=&quot;c1&quot;&gt;// convert result Table back into a DataSet and print it&lt;/span&gt;
+&lt;span class=&quot;n&quot;&gt;avgTempCTable&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;.&lt;/span&gt;&lt;span class=&quot;n&quot;&gt;toDataSet&lt;/span&gt;&lt;span class=&quot;o&quot;&gt;[&lt;/span&gt;&lt;span class=

<TRUNCATED>