You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by se...@apache.org on 2018/01/05 18:38:42 UTC

[01/19] flink git commit: [hotfix] Fix many many typos

Repository: flink
Updated Branches:
  refs/heads/master fb29898cd -> 7034e9cfc


http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/ClusterConfigurationInfo.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/ClusterConfigurationInfo.java b/flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/ClusterConfigurationInfo.java
index dba6a4c..627dc4c 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/ClusterConfigurationInfo.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/ClusterConfigurationInfo.java
@@ -24,7 +24,7 @@ import org.apache.flink.runtime.rest.handler.legacy.ClusterConfigHandler;
 import java.util.ArrayList;
 
 /**
- * Response of the {@link ClusterConfigHandler}, respresented as a list
+ * Response of the {@link ClusterConfigHandler}, represented as a list
  * of key-value pairs of the cluster {@link Configuration}.
  */
 public class ClusterConfigurationInfo extends ArrayList<ClusterConfigurationInfoEntry> implements ResponseBody {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/state/AbstractKeyedStateBackend.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/state/AbstractKeyedStateBackend.java b/flink-runtime/src/main/java/org/apache/flink/runtime/state/AbstractKeyedStateBackend.java
index fea537b..5edccd6 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/state/AbstractKeyedStateBackend.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/state/AbstractKeyedStateBackend.java
@@ -99,7 +99,7 @@ public abstract class AbstractKeyedStateBackend<K>
 	private final ExecutionConfig executionConfig;
 
 	/**
-	 * Decoratores the input and output streams to write key-groups compressed.
+	 * Decorates the input and output streams to write key-groups compressed.
 	 */
 	protected final StreamCompressionDecorator keyGroupCompressionDecorator;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/state/DefaultOperatorStateBackend.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/state/DefaultOperatorStateBackend.java b/flink-runtime/src/main/java/org/apache/flink/runtime/state/DefaultOperatorStateBackend.java
index 9edf8fc..aa17efb 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/state/DefaultOperatorStateBackend.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/state/DefaultOperatorStateBackend.java
@@ -426,7 +426,7 @@ public class DefaultOperatorStateBackend implements OperatorStateBackend {
 		private final ArrayList<S> internalList;
 
 		/**
-		 * A serializer that allows to perfom deep copies of internalList
+		 * A serializer that allows to perform deep copies of internalList
 		 */
 		private final ArrayListSerializer<S> internalListCopySerializer;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/state/SharedStateRegistry.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/state/SharedStateRegistry.java b/flink-runtime/src/main/java/org/apache/flink/runtime/state/SharedStateRegistry.java
index 24e3d92..458c695 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/state/SharedStateRegistry.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/state/SharedStateRegistry.java
@@ -79,7 +79,7 @@ public class SharedStateRegistry implements AutoCloseable {
 	 *
 	 * @param state the shared state for which we register a reference.
 	 * @return the result of this registration request, consisting of the state handle that is
-	 * registered under the key by the end of the oepration and its current reference count.
+	 * registered under the key by the end of the operation and its current reference count.
 	 */
 	public Result registerReference(SharedStateRegistryKey registrationKey, StreamStateHandle state) {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTable.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTable.java b/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTable.java
index 799f639..1384336 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTable.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotTable.java
@@ -179,7 +179,7 @@ public class TaskSlotTable implements TimeoutListener<AllocationID> {
 		boolean result = taskSlot.allocate(jobId, allocationId);
 
 		if (result) {
-			// update the alloction id to task slot map
+			// update the allocation id to task slot map
 			allocationIDTaskSlotMap.put(allocationId, taskSlot);
 
 			// register a timeout for this slot since it's in state allocated

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java b/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
index a049063..3c1e98e 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
@@ -1550,7 +1550,7 @@ public class Task implements Runnable, TaskActions {
 
 				// It is possible that the user code does not react to the task canceller.
 				// for that reason, we spawn this separate thread that repeatedly interrupts
-				// the user code until it exits. If the suer user code does not exit within
+				// the user code until it exits. If the user code does not exit within
 				// the timeout, we notify the job manager about a fatal error.
 				while (executer.isAlive()) {
 					long now = System.nanoTime();

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/scala/org/apache/flink/runtime/akka/AkkaUtils.scala
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/scala/org/apache/flink/runtime/akka/AkkaUtils.scala b/flink-runtime/src/main/scala/org/apache/flink/runtime/akka/AkkaUtils.scala
index 3df1c0a..cab1378 100644
--- a/flink-runtime/src/main/scala/org/apache/flink/runtime/akka/AkkaUtils.scala
+++ b/flink-runtime/src/main/scala/org/apache/flink/runtime/akka/AkkaUtils.scala
@@ -588,7 +588,7 @@ object AkkaUtils {
    * @param tries maximum number of tries before the future fails
    * @param executionContext which shall execute the future
    * @param timeout of the future
-   * @return future which tries to receover by re-executing itself a given number of times
+   * @return future which tries to recover by re-executing itself a given number of times
    */
   def retry(target: ActorRef, message: Any, tries: Int)(implicit executionContext:
   ExecutionContext, timeout: FiniteDuration): Future[Any] = {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/scala/org/apache/flink/runtime/messages/ArchiveMessages.scala
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/scala/org/apache/flink/runtime/messages/ArchiveMessages.scala b/flink-runtime/src/main/scala/org/apache/flink/runtime/messages/ArchiveMessages.scala
index 435b736..c1227dc 100644
--- a/flink-runtime/src/main/scala/org/apache/flink/runtime/messages/ArchiveMessages.scala
+++ b/flink-runtime/src/main/scala/org/apache/flink/runtime/messages/ArchiveMessages.scala
@@ -39,7 +39,7 @@ object ArchiveMessages {
   case object RequestJobCounts
 
   /**
-   * Reqeuest a specific ExecutionGraph by JobID. The response is [[RequestArchivedJob]]
+   * Request a specific ExecutionGraph by JobID. The response is [[RequestArchivedJob]]
    * @param jobID
    */
   case class RequestArchivedJob(jobID: JobID)

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java
index ab00f2b..2572bc1 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java
@@ -1493,7 +1493,7 @@ public class CheckpointCoordinatorTest extends TestLogger {
 		assertTrue(pending.isDiscarded());
 		assertTrue(savepointFuture.isDone());
 
-		// the now the saveppoint should be completed but not added to the completed checkpoint store
+		// the now the savepoint should be completed but not added to the completed checkpoint store
 		assertEquals(0, coord.getNumberOfRetainedSuccessfulCheckpoints());
 		assertEquals(0, coord.getNumberOfPendingCheckpoints());
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/PendingCheckpointTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/PendingCheckpointTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/PendingCheckpointTest.java
index ef31f0a..bf79457 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/PendingCheckpointTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/PendingCheckpointTest.java
@@ -249,7 +249,7 @@ public class PendingCheckpointTest {
 	@Test
 	public void testPendingCheckpointStatsCallbacks() throws Exception {
 		{
-			// Complete sucessfully
+			// Complete successfully
 			PendingCheckpointStats callback = mock(PendingCheckpointStats.class);
 			PendingCheckpoint pending = createPendingCheckpoint(CheckpointProperties.forStandardCheckpoint(), null);
 			pending.setStatsCallback(callback);

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/ZooKeeperCompletedCheckpointStoreITCase.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/ZooKeeperCompletedCheckpointStoreITCase.java b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/ZooKeeperCompletedCheckpointStoreITCase.java
index dc2b11e..f493d6f 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/ZooKeeperCompletedCheckpointStoreITCase.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/ZooKeeperCompletedCheckpointStoreITCase.java
@@ -270,7 +270,7 @@ public class ZooKeeperCompletedCheckpointStoreITCase extends CompletedCheckpoint
 
 		TestCompletedCheckpoint completedCheckpoint3 = createCheckpoint(3, sharedStateRegistry);
 
-		// this should release the last lock on completedCheckoint and thus discard it
+		// this should release the last lock on completedCheckpoint and thus discard it
 		zkCheckpointStore2.addCheckpoint(completedCheckpoint3);
 
 		// the checkpoint should be discarded eventually because there is no lock on it anymore

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/client/JobClientActorRecoveryITCase.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/client/JobClientActorRecoveryITCase.java b/flink-runtime/src/test/java/org/apache/flink/runtime/client/JobClientActorRecoveryITCase.java
index 4fdaef5..301d206 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/client/JobClientActorRecoveryITCase.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/client/JobClientActorRecoveryITCase.java
@@ -67,7 +67,7 @@ public class JobClientActorRecoveryITCase extends TestLogger {
 	}
 
 	/**
-	 * Tests wether the JobClientActor can connect to a newly elected leading job manager to obtain
+	 * Tests whether the JobClientActor can connect to a newly elected leading job manager to obtain
 	 * the JobExecutionResult. The submitted job blocks for the first execution attempt. The
 	 * leading job manager will be killed so that the second job manager will be elected as the
 	 * leader. The newly elected leader has to retrieve the checkpointed job from ZooKeeper

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/GlobalModVersionTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/GlobalModVersionTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/GlobalModVersionTest.java
index bfad327..fc769ae 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/GlobalModVersionTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/GlobalModVersionTest.java
@@ -95,7 +95,7 @@ public class GlobalModVersionTest extends TestLogger {
 	}
 
 	/**
-	 * Tests that failures during a global faiover are not handed to the local
+	 * Tests that failures during a global failover are not handed to the local
 	 * failover strategy.
 	 */
 	@Test

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
index 75627ed..bff7484 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/heartbeat/HeartbeatManagerTest.java
@@ -251,7 +251,7 @@ public class HeartbeatManagerTest extends TestLogger {
 	 */
 	@Test
 	public void testTargetUnmonitoring() throws InterruptedException, ExecutionException {
-		// this might be too aggresive for Travis, let's see...
+		// this might be too aggressive for Travis, let's see...
 		long heartbeatTimeout = 100L;
 		ResourceID resourceID = new ResourceID("foobar");
 		ResourceID targetID = new ResourceID("target");

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/ChannelViewsTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/ChannelViewsTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/ChannelViewsTest.java
index ba86131..8c7ca1b 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/ChannelViewsTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/ChannelViewsTest.java
@@ -262,7 +262,7 @@ public class ChannelViewsTest
 		final ChannelReaderInputView inView = new ChannelReaderInputView(reader, memory, true);
 		generator.reset();
 		
-		// read and re-generate all records and cmpare them
+		// read and re-generate all records and compare them
 		final Tuple2<Integer, String> readRec = new Tuple2<>();
 		for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
 			generator.next(rec);

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/NetworkEnvironmentTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/NetworkEnvironmentTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/NetworkEnvironmentTest.java
index ba92bdf..4964be7 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/NetworkEnvironmentTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/NetworkEnvironmentTest.java
@@ -112,7 +112,7 @@ public class NetworkEnvironmentTest {
 	 * @param partitionType
 	 * 		the produced partition type
 	 * @param channels
-	 * 		the nummer of output channels
+	 * 		the number of output channels
 	 *
 	 * @return instance with minimal data set and some mocks so that it is useful for {@link
 	 * NetworkEnvironment#registerTask(Task)}
@@ -140,7 +140,7 @@ public class NetworkEnvironmentTest {
 	 * @param partitionType
 	 * 		the consumed partition type
 	 * @param channels
-	 * 		the nummer of input channels
+	 * 		the number of input channels
 	 *
 	 * @return mock with minimal functionality necessary by {@link NetworkEnvironment#registerTask(Task)}
 	 */

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/TaskEventDispatcherTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/TaskEventDispatcherTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/TaskEventDispatcherTest.java
index 41201cf..1076f99 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/TaskEventDispatcherTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/TaskEventDispatcherTest.java
@@ -122,7 +122,7 @@ public class TaskEventDispatcherTest extends TestLogger {
 
 		ted.unregisterPartition(partitionId2);
 
-		// publis something for partitionId1 triggering all according listeners
+		// publish something for partitionId1 triggering all according listeners
 		assertTrue(ted.publish(partitionId1, event));
 		assertTrue("listener should have fired for AllWorkersDoneEvent", eventListener1a.fired);
 		assertTrue("listener should have fired for AllWorkersDoneEvent", eventListener2.fired);

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerTest.java
index 51cc469..5bc207a 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerTest.java
@@ -906,7 +906,7 @@ public class JobManagerTest extends TestLogger {
 			msg = new TestingJobManagerMessages.WaitForAllVerticesToBeRunning(jobGraph.getJobID());
 			Await.result(jobManager.ask(msg, timeout), timeout);
 
-			// Notify when canelled
+			// Notify when cancelled
 			msg = new NotifyWhenJobStatus(jobGraph.getJobID(), JobStatus.CANCELED);
 			Future<Object> cancelled = jobManager.ask(msg, timeout);
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/metrics/groups/TaskMetricGroupTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/metrics/groups/TaskMetricGroupTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/metrics/groups/TaskMetricGroupTest.java
index be7407e..47ee1a9 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/metrics/groups/TaskMetricGroupTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/metrics/groups/TaskMetricGroupTest.java
@@ -147,7 +147,7 @@ public class TaskMetricGroupTest extends TestLogger {
 
 		taskMetricGroup.close();
 
-		// now alle registered metrics should have been unregistered
+		// now all registered metrics should have been unregistered
 		assertEquals(0, registry.getNumberRegisteredMetrics());
 
 		registry.shutdown();

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationRegistryTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationRegistryTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationRegistryTest.java
index 74e16a0..f860a30 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationRegistryTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationRegistryTest.java
@@ -44,7 +44,7 @@ import static org.mockito.Mockito.when;
 public class KvStateLocationRegistryTest {
 
 	/**
-	 * Simple test registering/unregistereing state and looking it up again.
+	 * Simple test registering/unregistering state and looking it up again.
 	 */
 	@Test
 	public void testRegisterAndLookup() throws Exception {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java
index 3c79948..b46b4e0 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java
@@ -38,7 +38,7 @@ import static org.junit.Assert.assertEquals;
 public class KvStateLocationTest {
 
 	/**
-	 * Simple test registering/unregistereing state and looking it up again.
+	 * Simple test registering/unregistering state and looking it up again.
 	 */
 	@Test
 	public void testRegisterAndLookup() throws Exception {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/resourcemanager/slotmanager/SlotManagerTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/resourcemanager/slotmanager/SlotManagerTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/resourcemanager/slotmanager/SlotManagerTest.java
index cf0aef9..375eb0b 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/resourcemanager/slotmanager/SlotManagerTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/resourcemanager/slotmanager/SlotManagerTest.java
@@ -999,7 +999,7 @@ public class SlotManagerTest extends TestLogger {
 				() -> slotManager.isTaskManagerIdle(taskManagerConnection.getInstanceID()),
 				mainThreadExecutor);
 
-			// check that the TaskManaer is not idle
+			// check that the TaskManager is not idle
 			assertFalse(idleFuture.get());
 
 			final SlotID slotId = slotIdArgumentCaptor.getValue();

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/checkpoints/CheckpointStatsCacheTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/checkpoints/CheckpointStatsCacheTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/checkpoints/CheckpointStatsCacheTest.java
index 7337772..17d9234 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/checkpoints/CheckpointStatsCacheTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/checkpoints/CheckpointStatsCacheTest.java
@@ -30,7 +30,7 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 /**
- * Tests for the CheckpoitnStatsCache.
+ * Tests for the CheckpointStatsCache.
  */
 public class CheckpointStatsCacheTest {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/state/SharedStateRegistryTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/state/SharedStateRegistryTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/state/SharedStateRegistryTest.java
index 4104595..6272473 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/state/SharedStateRegistryTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/state/SharedStateRegistryTest.java
@@ -82,7 +82,7 @@ public class SharedStateRegistryTest {
 	}
 
 	/**
-	 * Validate that unregister an unexisted key will throw exception
+	 * Validate that unregister a nonexistent key will throw exception
 	 */
 	@Test(expected = IllegalStateException.class)
 	public void testUnregisterWithUnexistedKey() {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java b/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
index 0ac607f..7730aec 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/state/StateBackendTestBase.java
@@ -463,7 +463,7 @@ public abstract class StateBackendTestBase<B extends AbstractStateBackend> exten
 	 *  - snapshot was taken without any Kryo registrations, specific serializers or default serializers for the state type
 	 *  - restored with the state type registered (no specific serializer)
 	 *
-	 * This test should not fail, because de- / serialization of the state should noth be performed with Kryo's default
+	 * This test should not fail, because de- / serialization of the state should not be performed with Kryo's default
 	 * {@link com.esotericsoftware.kryo.serializers.FieldSerializer}.
 	 */
 	@Test

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerServicesTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerServicesTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerServicesTest.java
index f47608c..0059866 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerServicesTest.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TaskManagerServicesTest.java
@@ -174,7 +174,7 @@ public class TaskManagerServicesTest extends TestLogger{
 	/**
 	 * Returns the value or the lower/upper bound in case the value is less/greater than the lower/upper bound, respectively.
 	 *
-	 * @param value value to inspec
+	 * @param value value to inspect
 	 * @param lower lower bound
 	 * @param upper upper bound
 	 *

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TestingTaskExecutorGateway.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TestingTaskExecutorGateway.java b/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TestingTaskExecutorGateway.java
index a9b676e..94f325d 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TestingTaskExecutorGateway.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/taskexecutor/TestingTaskExecutorGateway.java
@@ -104,7 +104,7 @@ public class TestingTaskExecutorGateway implements TaskExecutorGateway {
 
 	@Override
 	public void disconnectJobManager(JobID jobId, Exception cause) {
-		// nooop
+		// noop
 	}
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingJobManagerMessages.scala
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingJobManagerMessages.scala b/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingJobManagerMessages.scala
index f79c124..bb16197 100644
--- a/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingJobManagerMessages.scala
+++ b/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingJobManagerMessages.scala
@@ -121,7 +121,7 @@ object TestingJobManagerMessages {
     */
   case object NotifyWhenClientConnects
   /**
-    * Notifes of client connect
+    * Notifies of client connect
     */
   case object ClientConnected
   /**

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-scala-shell/src/test/scala/org/apache/flink/api/scala/ScalaShellITCase.scala
----------------------------------------------------------------------
diff --git a/flink-scala-shell/src/test/scala/org/apache/flink/api/scala/ScalaShellITCase.scala b/flink-scala-shell/src/test/scala/org/apache/flink/api/scala/ScalaShellITCase.scala
index b4ccfaa..6148450 100644
--- a/flink-scala-shell/src/test/scala/org/apache/flink/api/scala/ScalaShellITCase.scala
+++ b/flink-scala-shell/src/test/scala/org/apache/flink/api/scala/ScalaShellITCase.scala
@@ -325,7 +325,7 @@ object ScalaShellITCase {
 
   @AfterClass
   def afterAll(): Unit = {
-    // The Scala interpreter somehow changes the class loader. Therfore, we have to reset it
+    // The Scala interpreter somehow changes the class loader. Therefore, we have to reset it
     Thread.currentThread().setContextClassLoader(classOf[ScalaShellITCase].getClassLoader)
 
     cluster.foreach(_.close)

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-scala/src/main/scala/org/apache/flink/api/scala/codegen/TypeAnalyzer.scala
----------------------------------------------------------------------
diff --git a/flink-scala/src/main/scala/org/apache/flink/api/scala/codegen/TypeAnalyzer.scala b/flink-scala/src/main/scala/org/apache/flink/api/scala/codegen/TypeAnalyzer.scala
index 11d5ec7..9f02706 100644
--- a/flink-scala/src/main/scala/org/apache/flink/api/scala/codegen/TypeAnalyzer.scala
+++ b/flink-scala/src/main/scala/org/apache/flink/api/scala/codegen/TypeAnalyzer.scala
@@ -204,7 +204,7 @@ private[flink] trait TypeAnalyzer[C <: Context] { this: MacroContextHolder[C]
       }
 
       if (!hasZeroCtor) {
-        // We don't support POJOs without zero-paramter ctor
+        // We don't support POJOs without zero-parameter ctor
         return GenericClassDescriptor(id, tpe)
       }
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-scala/src/test/scala/org/apache/flink/api/scala/MaxByOperatorTest.scala
----------------------------------------------------------------------
diff --git a/flink-scala/src/test/scala/org/apache/flink/api/scala/MaxByOperatorTest.scala b/flink-scala/src/test/scala/org/apache/flink/api/scala/MaxByOperatorTest.scala
index 4266449..4a7a3ff 100644
--- a/flink-scala/src/test/scala/org/apache/flink/api/scala/MaxByOperatorTest.scala
+++ b/flink-scala/src/test/scala/org/apache/flink/api/scala/MaxByOperatorTest.scala
@@ -40,7 +40,7 @@ class MaxByOperatorTest {
 
   /**
     * This test validates that an index which is out of bounds throws an
-    * IndexOutOfBOundsExcpetion.
+    * IndexOutOfBoundsException.
     */
   @Test(expected = classOf[IndexOutOfBoundsException])
   def testOutOfTupleBoundsDataset1() {
@@ -54,7 +54,7 @@ class MaxByOperatorTest {
 
   /**
     * This test validates that an index which is out of bounds throws an
-    * IndexOutOfBOundsExcpetion.
+    * IndexOutOfBoundsException.
     */
   @Test(expected = classOf[IndexOutOfBoundsException])
   def testOutOfTupleBoundsDataset2() {
@@ -67,7 +67,7 @@ class MaxByOperatorTest {
 
   /**
     * This test validates that an index which is out of bounds throws an
-    * IndexOutOfBOundsExcpetion.
+    * IndexOutOfBoundsException.
     */
   @Test(expected = classOf[IndexOutOfBoundsException])
   def testOutOfTupleBoundsDataset3() {
@@ -96,7 +96,7 @@ class MaxByOperatorTest {
   }
 
   /**
-    * This test validates that an InvalidProgrammException is thrown when maxBy
+    * This test validates that an InvalidProgramException is thrown when maxBy
     * is used on a custom data type.
     */
   @Test(expected = classOf[InvalidProgramException])
@@ -110,7 +110,7 @@ class MaxByOperatorTest {
   }
 
   /**
-    * This test validates that an InvalidProgrammException is thrown when maxBy
+    * This test validates that an InvalidProgramException is thrown when maxBy
     * is used on a custom data type.
     */
   @Test(expected = classOf[InvalidProgramException])
@@ -123,7 +123,7 @@ class MaxByOperatorTest {
   }
   /**
     * This test validates that an index which is out of bounds throws an
-    * IndexOutOfBOundsExcpetion.
+    * IndexOutOfBoundsException.
     */
   @Test(expected = classOf[IndexOutOfBoundsException])
   def testOutOfTupleBoundsGrouping1() {
@@ -135,7 +135,7 @@ class MaxByOperatorTest {
 
   /**
     * This test validates that an index which is out of bounds throws an
-    * IndexOutOfBOundsExcpetion.
+    * IndexOutOfBoundsException.
     */
   @Test(expected = classOf[IndexOutOfBoundsException])
   def testOutOfTupleBoundsGrouping2() {
@@ -147,7 +147,7 @@ class MaxByOperatorTest {
 
   /**
     * This test validates that an index which is out of bounds throws an
-    * IndexOutOfBOundsExcpetion.
+    * IndexOutOfBoundsException.
     */
   @Test(expected = classOf[IndexOutOfBoundsException])
   def testOutOfTupleBoundsGrouping3() {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-scala/src/test/scala/org/apache/flink/api/scala/MinByOperatorTest.scala
----------------------------------------------------------------------
diff --git a/flink-scala/src/test/scala/org/apache/flink/api/scala/MinByOperatorTest.scala b/flink-scala/src/test/scala/org/apache/flink/api/scala/MinByOperatorTest.scala
index 5e659ad..ca3e7f0 100644
--- a/flink-scala/src/test/scala/org/apache/flink/api/scala/MinByOperatorTest.scala
+++ b/flink-scala/src/test/scala/org/apache/flink/api/scala/MinByOperatorTest.scala
@@ -39,7 +39,7 @@ class MinByOperatorTest {
 
   /**
     * This test validates that an index which is out of bounds throws an
-    * IndexOutOfBOundsExcpetion.
+    * IndexOutOfBoundsException.
     */
   @Test(expected = classOf[IndexOutOfBoundsException])
   def testOutOfTupleBoundsDataset1() {
@@ -53,7 +53,7 @@ class MinByOperatorTest {
 
   /**
     * This test validates that an index which is out of bounds throws an
-    * IndexOutOfBOundsExcpetion.
+    * IndexOutOfBoundsException.
     */
   @Test(expected = classOf[IndexOutOfBoundsException])
   def testOutOfTupleBoundsDataset2() {
@@ -66,7 +66,7 @@ class MinByOperatorTest {
 
   /**
     * This test validates that an index which is out of bounds throws an
-    * IndexOutOfBOundsExcpetion.
+    * IndexOutOfBoundsException.
     */
   @Test(expected = classOf[IndexOutOfBoundsException])
   def testOutOfTupleBoundsDataset3() {
@@ -78,7 +78,7 @@ class MinByOperatorTest {
   }
 
   /**
-    * This test validates that an InvalidProgrammException is thrown when minBy
+    * This test validates that an InvalidProgramException is thrown when minBy
     * is used on a custom data type.
     */
   @Test(expected = classOf[InvalidProgramException])
@@ -109,7 +109,7 @@ class MinByOperatorTest {
   }
 
   /**
-    * This test validates that an InvalidProgrammException is thrown when minBy
+    * This test validates that an InvalidProgramException is thrown when minBy
     * is used on a custom data type.
     */
   @Test(expected = classOf[InvalidProgramException])
@@ -123,7 +123,7 @@ class MinByOperatorTest {
 
   /**
     * This test validates that an index which is out of bounds throws an
-    * IndexOutOfBOundsExcpetion.
+    * IndexOutOfBoundsException.
     */
   @Test(expected = classOf[IndexOutOfBoundsException])
   def testOutOfTupleBoundsGrouping1() {
@@ -136,7 +136,7 @@ class MinByOperatorTest {
 
   /**
     * This test validates that an index which is out of bounds throws an
-    * IndexOutOfBOundsExcpetion.
+    * IndexOutOfBoundsException.
     */
   @Test(expected = classOf[IndexOutOfBoundsException])
   def testOutOfTupleBoundsGrouping2() {
@@ -149,7 +149,7 @@ class MinByOperatorTest {
 
   /**s
     * This test validates that an index which is out of bounds throws an
-    * IndexOutOfBOundsExcpetion.
+    * IndexOutOfBoundsException.
     */
   @Test(expected = classOf[IndexOutOfBoundsException])
   def testOutOfTupleBoundsGrouping3() {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/CheckpointingMode.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/CheckpointingMode.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/CheckpointingMode.java
index f4597b8..eeaf26d 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/CheckpointingMode.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/CheckpointingMode.java
@@ -28,7 +28,7 @@ import org.apache.flink.annotation.Public;
  * processing are repeated. For stateful operations and functions, the checkpointing mode defines
  * whether the system draws checkpoints such that a recovery behaves as if the operators/functions
  * see each record "exactly once" ({@link #EXACTLY_ONCE}), or whether the checkpoints are drawn
- * in a simpler fashion that typically encounteres some duplicates upon recovery
+ * in a simpler fashion that typically encounters some duplicates upon recovery
  * ({@link #AT_LEAST_ONCE})</p>
  */
 @Public

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
index 4bbb123..c2ebdf4 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/CoGroupedStreams.java
@@ -611,7 +611,7 @@ public class CoGroupedStreams<T1, T2> {
 
 	// ------------------------------------------------------------------------
 	//  Utility functions that implement the CoGroup logic based on the tagged
-	//  untion window reduce
+	//  union window reduce
 	// ------------------------------------------------------------------------
 
 	private static class Input1Tagger<T1, T2> implements MapFunction<T1, TaggedUnion<T1, T2>> {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
index 28bd5c1..8f2531b 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
@@ -424,7 +424,7 @@ public abstract class StreamExecutionEnvironment {
 	 *
 	 * <p>Shorthand for {@code getCheckpointConfig().getCheckpointingMode()}.
 	 *
-	 * @return The checkpoin
+	 * @return The checkpoint mode
 	 */
 	public CheckpointingMode getCheckpointingMode() {
 		return checkpointCfg.getCheckpointingMode();

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamConfig.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamConfig.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamConfig.java
index 13100db..63563f3 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamConfig.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamConfig.java
@@ -499,7 +499,7 @@ public class StreamConfig implements Serializable {
 
 
 	// ------------------------------------------------------------------------
-	//  Miscellansous
+	//  Miscellaneous
 	// ------------------------------------------------------------------------
 
 	public void setChainStart() {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java
index 70b9fd4..0a05f09 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java
@@ -558,7 +558,7 @@ public class StreamGraphGenerator {
 	/**
 	 * Transforms a {@code TwoInputTransformation}.
 	 *
-	 * <p>This recusively transforms the inputs, creates a new {@code StreamNode} in the graph and
+	 * <p>This recursively transforms the inputs, creates a new {@code StreamNode} in the graph and
 	 * wired the inputs to this new node.
 	 */
 	private <IN1, IN2, OUT> Collection<Integer> transformTwoInputTransform(TwoInputTransformation<IN1, IN2, OUT> transform) {
@@ -617,7 +617,7 @@ public class StreamGraphGenerator {
 	 *
 	 * <p>If the user specifies a group name, this is taken as is. If nothing is specified and
 	 * the input operations all have the same group name then this name is taken. Otherwise the
-	 * default group is choosen.
+	 * default group is chosen.
 	 *
 	 * @param specifiedGroup The group specified by the user.
 	 * @param inputIds The IDs of the input operations.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/HeapInternalTimerService.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/HeapInternalTimerService.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/HeapInternalTimerService.java
index 1b531aa..b031dcf 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/HeapInternalTimerService.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/HeapInternalTimerService.java
@@ -128,7 +128,7 @@ public class HeapInternalTimerService<K, N> implements InternalTimerService<N>,
 	 * <ol>
 	 *     <li>Setting the {@code keySerialized} and {@code namespaceSerializer} for the timers it will contain.</li>
 	 *     <li>Setting the {@code triggerTarget} which contains the action to be performed when a timer fires.</li>
-	 *     <li>Re-registering timers that were retrieved after recoveting from a node failure, if any.</li>
+	 *     <li>Re-registering timers that were retrieved after recovering from a node failure, if any.</li>
 	 * </ol>
 	 * This method can be called multiple times, as long as it is called with the same serializers.
 	 */

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/AsyncResult.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/AsyncResult.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/AsyncResult.java
index 751de76..2b31d51 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/AsyncResult.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/AsyncResult.java
@@ -40,7 +40,7 @@ public interface AsyncResult {
 	/**
 	 * True fi the async result is a collection of output elements; otherwise false.
 	 *
-	 * @return True if the async reuslt is a collection of output elements; otherwise false
+	 * @return True if the async result is a collection of output elements; otherwise false
 	 */
 	boolean isResultCollection();
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/UnorderedStreamElementQueue.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/UnorderedStreamElementQueue.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/UnorderedStreamElementQueue.java
index e2c3426..687ea7e 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/UnorderedStreamElementQueue.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/async/queue/UnorderedStreamElementQueue.java
@@ -219,7 +219,7 @@ public class UnorderedStreamElementQueue implements StreamElementQueue {
 
 	/**
 	 * Callback for onComplete events for the given stream element queue entry. Whenever a queue
-	 * entry is completed, it is checked whether this entry belogns to the first set. If this is the
+	 * entry is completed, it is checked whether this entry belongs to the first set. If this is the
 	 * case, then the element is added to the completed entries queue from where it can be consumed.
 	 * If the first set becomes empty, then the next set is polled from the uncompleted entries
 	 * queue. Completed entries from this new set are then added to the completed entries queue.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/CoFeedbackTransformation.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/CoFeedbackTransformation.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/CoFeedbackTransformation.java
index 28496fc..0fc5d2c 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/CoFeedbackTransformation.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/CoFeedbackTransformation.java
@@ -61,7 +61,7 @@ public class CoFeedbackTransformation<F> extends StreamTransformation<F> {
 	/**
 	 * Creates a new {@code CoFeedbackTransformation} from the given input.
 	 *
-	 * @param parallelism The parallelism of the upstream {@code StreamTransformatino} and the
+	 * @param parallelism The parallelism of the upstream {@code StreamTransformation} and the
 	 *                    feedback edges.
 	 * @param feedbackType The type of the feedback edges
 	 * @param waitTime The wait time of the feedback operator. After the time expires

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/OneInputTransformation.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/OneInputTransformation.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/OneInputTransformation.java
index c936286..b39ce27 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/OneInputTransformation.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/OneInputTransformation.java
@@ -34,7 +34,7 @@ import java.util.List;
  * {@link org.apache.flink.streaming.api.operators.OneInputStreamOperator} to one input
  * {@link org.apache.flink.streaming.api.transformations.StreamTransformation}.
  *
- * @param <IN> The type of the elements in the nput {@code StreamTransformation}
+ * @param <IN> The type of the elements in the input {@code StreamTransformation}
  * @param <OUT> The type of the elements that result from this {@code OneInputTransformation}
  */
 @Internal

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSet.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSet.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSet.java
index 10ac2a6..4c148e8 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSet.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSet.java
@@ -207,7 +207,7 @@ public class MergingWindowSet<W extends Window> {
 
 			// don't merge the new window itself, it never had any state associated with it
 			// i.e. if we are only merging one pre-existing window into itself
-			// without extending the pre-exising window
+			// without extending the pre-existing window
 			if (!(mergedWindows.contains(mergeResult) && mergedWindows.size() == 1)) {
 				mergeFunction.merge(mergeResult,
 						mergedWindows,

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java
index cf606bc..aa19168 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java
@@ -628,7 +628,7 @@ public class WindowOperator<K, IN, ACC, OUT, W extends Window>
 	/**
 	 * Returns the cleanup time for a window, which is
 	 * {@code window.maxTimestamp + allowedLateness}. In
-	 * case this leads to a value greated than {@link Long#MAX_VALUE}
+	 * case this leads to a value greater than {@link Long#MAX_VALUE}
 	 * then a cleanup time of {@link Long#MAX_VALUE} is
 	 * returned.
 	 *

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/main/java/org/apache/flink/streaming/util/typeutils/FieldAccessor.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/util/typeutils/FieldAccessor.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/util/typeutils/FieldAccessor.java
index 21f6208..3751670 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/util/typeutils/FieldAccessor.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/util/typeutils/FieldAccessor.java
@@ -47,7 +47,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  *
  * <p>Field expressions that specify nested fields (e.g. "f1.a.foo") result in nested field
  * accessors. These penetrate one layer, and then delegate the rest of the work to an
- * "innerAccesor". (see PojoFieldAccessor, RecursiveTupleFieldAccessor,
+ * "innerAccessor". (see PojoFieldAccessor, RecursiveTupleFieldAccessor,
  * RecursiveProductFieldAccessor)
  */
 @Internal

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/DataStreamTest.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/DataStreamTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/DataStreamTest.java
index 68738ba..b76ade7 100644
--- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/DataStreamTest.java
+++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/DataStreamTest.java
@@ -187,7 +187,7 @@ public class DataStreamTest {
 			assertTrue(edge.getPartitioner() instanceof ForwardPartitioner);
 		}
 
-		// verify self union with differnt partitioners
+		// verify self union with different partitioners
 		assertTrue(streamGraph.getStreamNode(selfUnionDifferentPartition.getId()).getInEdges().size() == 2);
 		boolean hasForward = false;
 		boolean hasBroadcast = false;

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSetTest.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSetTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSetTest.java
index 019faca..6ed4bf7 100644
--- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSetTest.java
+++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSetTest.java
@@ -208,7 +208,7 @@ public class MergingWindowSetTest {
 
 		TestingMergeFunction mergeFunction = new TestingMergeFunction();
 
-		// add several non-overlapping initial windoww
+		// add several non-overlapping initial windows
 
 		mergeFunction.reset();
 		assertEquals(new TimeWindow(0, 3), windowSet.addWindow(new TimeWindow(0, 3), mergeFunction));
@@ -333,7 +333,7 @@ public class MergingWindowSetTest {
 
 		TestingMergeFunction mergeFunction = new TestingMergeFunction();
 
-		// add several non-overlapping initial windoww
+		// add several non-overlapping initial windows
 
 		mergeFunction.reset();
 		assertEquals(new TimeWindow(1, 3), windowSet.addWindow(new TimeWindow(1, 3), mergeFunction));

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorContractTest.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorContractTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorContractTest.java
index bd263f6..30b8da9 100644
--- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorContractTest.java
+++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorContractTest.java
@@ -1468,7 +1468,7 @@ public abstract class WindowOperatorContractTest extends TestLogger {
 			@Override
 			public TriggerResult answer(InvocationOnMock invocation) throws Exception {
 				Trigger.TriggerContext context = (Trigger.TriggerContext) invocation.getArguments()[3];
-				// don't intefere with cleanup timers
+				// don't interfere with cleanup timers
 				timeAdaptor.registerTimer(context, 0L);
 				context.getPartitionedState(valueStateDescriptor).update("hello");
 				return TriggerResult.CONTINUE;
@@ -1479,7 +1479,7 @@ public abstract class WindowOperatorContractTest extends TestLogger {
 			@Override
 			public TriggerResult answer(InvocationOnMock invocation) throws Exception {
 				Trigger.OnMergeContext context = (Trigger.OnMergeContext) invocation.getArguments()[1];
-				// don't intefere with cleanup timers
+				// don't interfere with cleanup timers
 				timeAdaptor.registerTimer(context, 0L);
 				context.getPartitionedState(valueStateDescriptor).update("hello");
 				return TriggerResult.CONTINUE;

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorMigrationTest.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorMigrationTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorMigrationTest.java
index d7df479..7c5767a 100644
--- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorMigrationTest.java
+++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorMigrationTest.java
@@ -191,7 +191,7 @@ public class WindowOperatorMigrationTest {
 
 		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator());
 
-		// add an element that merges the two "key1" sessions, they should now have count 6, and therfore fire
+		// add an element that merges the two "key1" sessions, they should now have count 6, and therefore fire
 		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 10), 4500));
 
 		expectedOutput.add(new StreamRecord<>(new Tuple3<>("key1-22", 10L, 10000L), 9999L));
@@ -300,7 +300,7 @@ public class WindowOperatorMigrationTest {
 
 		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator());
 
-		// add an element that merges the two "key1" sessions, they should now have count 6, and therfore fire
+		// add an element that merges the two "key1" sessions, they should now have count 6, and therefore fire
 		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 10), 4500));
 
 		expectedOutput.add(new StreamRecord<>(new Tuple3<>("key1-22", 10L, 10000L), 9999L));

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorTest.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorTest.java
index acdf45a..2fa1c3c 100644
--- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorTest.java
+++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperatorTest.java
@@ -713,7 +713,7 @@ public class WindowOperatorTest extends TestLogger {
 
 		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator());
 
-		// add an element that merges the two "key1" sessions, they should now have count 6, and therfore fire
+		// add an element that merges the two "key1" sessions, they should now have count 6, and therefore fire
 		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 10), 4500));
 
 		expectedOutput.add(new StreamRecord<>(new Tuple3<>("key1-22", 10L, 10000L), 9999L));

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTerminationTest.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTerminationTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTerminationTest.java
index e3e51aa..8bb1028 100644
--- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTerminationTest.java
+++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTerminationTest.java
@@ -283,7 +283,7 @@ public class StreamTaskTerminationTest extends TestLogger {
 
 		@Override
 		public OperatorStateHandle call() throws Exception {
-			// notify that we have started the asynchronous checkpointint operation
+			// notify that we have started the asynchronous checkpointed operation
 			CHECKPOINTING_LATCH.trigger();
 			// wait until we have reached the StreamTask#cleanup --> This will already cancel this FutureTask
 			CLEANUP_LATCH.await();

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java
index 8ce8b03..5059827 100644
--- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java
+++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java
@@ -743,7 +743,7 @@ public class StreamTaskTest extends TestLogger {
 	}
 
 	/**
-	 * Tests that the StreamTask first closes alls its operators before setting its
+	 * Tests that the StreamTask first closes all of its operators before setting its
 	 * state to not running (isRunning == false)
 	 *
 	 * <p>See FLINK-7430.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/CoGroupedStreams.scala
----------------------------------------------------------------------
diff --git a/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/CoGroupedStreams.scala b/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/CoGroupedStreams.scala
index 52c53d5..101d358 100644
--- a/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/CoGroupedStreams.scala
+++ b/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/CoGroupedStreams.scala
@@ -75,7 +75,7 @@ class CoGroupedStreams[T1, T2](input1: DataStream[T1], input2: DataStream[T2]) {
    * A co-group operation that has [[KeySelector]]s defined for the first input.
    *
    * You need to specify a [[KeySelector]] for the second input using [[equalTo()]]
-   * before you can proceeed with specifying a [[WindowAssigner]] using [[EqualTo.window()]].
+   * before you can proceed with specifying a [[WindowAssigner]] using [[EqualTo.window()]].
    *
    * @tparam KEY Type of the key. This must be the same for both inputs
    */

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CheckpointedStreamingProgram.java
----------------------------------------------------------------------
diff --git a/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CheckpointedStreamingProgram.java b/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CheckpointedStreamingProgram.java
index 51fad6b..0503c93 100644
--- a/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CheckpointedStreamingProgram.java
+++ b/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CheckpointedStreamingProgram.java
@@ -53,7 +53,7 @@ public class CheckpointedStreamingProgram {
 		env.execute("Checkpointed Streaming Program");
 	}
 
-	// with Checkpoining
+	// with Checkpointing
 	private static class SimpleStringGenerator implements SourceFunction<String>, ListCheckpointed<Integer> {
 		public boolean running = true;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CustomKvStateProgram.java
----------------------------------------------------------------------
diff --git a/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CustomKvStateProgram.java b/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CustomKvStateProgram.java
index 819ad29..da33447 100644
--- a/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CustomKvStateProgram.java
+++ b/flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CustomKvStateProgram.java
@@ -37,7 +37,7 @@ import java.util.concurrent.ThreadLocalRandom;
 /**
  * A streaming program with a custom reducing KvState.
  *
- * <p>This is used to test proper usage of the user code class laoder when
+ * <p>This is used to test proper usage of the user code class loader when
  * disposing savepoints.
  */
 public class CustomKvStateProgram {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-tests/src/test/java/org/apache/flink/test/operators/JoinITCase.java
----------------------------------------------------------------------
diff --git a/flink-tests/src/test/java/org/apache/flink/test/operators/JoinITCase.java b/flink-tests/src/test/java/org/apache/flink/test/operators/JoinITCase.java
index 38adb62..aa96909 100644
--- a/flink-tests/src/test/java/org/apache/flink/test/operators/JoinITCase.java
+++ b/flink-tests/src/test/java/org/apache/flink/test/operators/JoinITCase.java
@@ -460,7 +460,7 @@ public class JoinITCase extends MultipleProgramsTestBase {
 	@Test
 	public void testDefaultJoinOnTwoCustomTypeInputsWithInnerClassKeyExtractorsDisabledClosureCleaner() throws Exception {
 		/*
-		 * (Default) Join on two custom type inputs with key extractors, check if disableing closure cleaning works
+		 * (Default) Join on two custom type inputs with key extractors, check if disabling closure cleaning works
 		 */
 
 		final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-tests/src/test/java/org/apache/flink/test/runtime/leaderelection/ZooKeeperLeaderElectionITCase.java
----------------------------------------------------------------------
diff --git a/flink-tests/src/test/java/org/apache/flink/test/runtime/leaderelection/ZooKeeperLeaderElectionITCase.java b/flink-tests/src/test/java/org/apache/flink/test/runtime/leaderelection/ZooKeeperLeaderElectionITCase.java
index 1fbbdb2..75a885f 100644
--- a/flink-tests/src/test/java/org/apache/flink/test/runtime/leaderelection/ZooKeeperLeaderElectionITCase.java
+++ b/flink-tests/src/test/java/org/apache/flink/test/runtime/leaderelection/ZooKeeperLeaderElectionITCase.java
@@ -156,7 +156,7 @@ public class ZooKeeperLeaderElectionITCase extends TestLogger {
 		configuration.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, numSlotsPerTM);
 
 		// we "effectively" disable the automatic RecoverAllJobs message and sent it manually to make
-		// sure that all TMs have registered to the JM prior to issueing the RecoverAllJobs message
+		// sure that all TMs have registered to the JM prior to issuing the RecoverAllJobs message
 		configuration.setString(AkkaOptions.ASK_TIMEOUT, AkkaUtils.INF_TIMEOUT().toString());
 
 		Tasks.BlockingOnceReceiver$.MODULE$.blocking_$eq(true);

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/TimestampITCase.java
----------------------------------------------------------------------
diff --git a/flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/TimestampITCase.java b/flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/TimestampITCase.java
index 0deda4b..5e08e8a 100644
--- a/flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/TimestampITCase.java
+++ b/flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/TimestampITCase.java
@@ -301,7 +301,7 @@ public class TimestampITCase extends TestLogger {
 
 	/**
 	 * This tests whether timestamps are properly extracted in the timestamp
-	 * extractor and whether watermarks are also correctly forwared from this with the auto watermark
+	 * extractor and whether watermarks are also correctly forwarded from this with the auto watermark
 	 * interval.
 	 */
 	@Test
@@ -363,7 +363,7 @@ public class TimestampITCase extends TestLogger {
 	}
 
 	/**
-	 * This thests whether timestamps are properly extracted in the timestamp
+	 * This tests whether timestamps are properly extracted in the timestamp
 	 * extractor and whether watermark are correctly forwarded from the custom watermark emit
 	 * function.
 	 */

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-tests/src/test/java/org/apache/flink/test/util/CoordVector.java
----------------------------------------------------------------------
diff --git a/flink-tests/src/test/java/org/apache/flink/test/util/CoordVector.java b/flink-tests/src/test/java/org/apache/flink/test/util/CoordVector.java
index aadeaeb..6d5df02 100644
--- a/flink-tests/src/test/java/org/apache/flink/test/util/CoordVector.java
+++ b/flink-tests/src/test/java/org/apache/flink/test/util/CoordVector.java
@@ -27,7 +27,7 @@ import java.io.IOException;
 /**
  * Implements a feature vector as a multi-dimensional point. Coordinates of that point
  * (= the features) are stored as double values. The distance between two feature vectors is
- * the Euclidian distance between the points.
+ * the Euclidean distance between the points.
  */
 public final class CoordVector implements Value, Comparable<CoordVector> {
 	private static final long serialVersionUID = 1L;
@@ -82,14 +82,14 @@ public final class CoordVector implements Value, Comparable<CoordVector> {
 	}
 
 	/**
-	 * Computes the Euclidian distance between this coordinate vector and a
+	 * Computes the Euclidean distance between this coordinate vector and a
 	 * second coordinate vector.
 	 *
 	 * @param cv The coordinate vector to which the distance is computed.
-	 * @return The Euclidian distance to coordinate vector cv. If cv has a
+	 * @return The Euclidean distance to coordinate vector cv. If cv has a
 	 *         different length than this coordinate vector, -1 is returned.
 	 */
-	public double computeEuclidianDistance(CoordVector cv) {
+	public double computeEuclideanDistance(CoordVector cv) {
 		// check coordinate vector lengths
 		if (cv.coordinates.length != this.coordinates.length) {
 			return -1.0;

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-tests/src/test/java/org/apache/flink/test/windowing/sessionwindows/ParallelSessionsEventGenerator.java
----------------------------------------------------------------------
diff --git a/flink-tests/src/test/java/org/apache/flink/test/windowing/sessionwindows/ParallelSessionsEventGenerator.java b/flink-tests/src/test/java/org/apache/flink/test/windowing/sessionwindows/ParallelSessionsEventGenerator.java
index 56a95ce..093a44e 100644
--- a/flink-tests/src/test/java/org/apache/flink/test/windowing/sessionwindows/ParallelSessionsEventGenerator.java
+++ b/flink-tests/src/test/java/org/apache/flink/test/windowing/sessionwindows/ParallelSessionsEventGenerator.java
@@ -95,7 +95,7 @@ public class ParallelSessionsEventGenerator<K, E> {
 			final int index = i % subGeneratorLists.size();
 			EventGenerator<K, E> subGenerator = subGeneratorLists.get(index);
 
-			// check if the sub-generator can produce an event under the current gloabl watermark
+			// check if the sub-generator can produce an event under the current global watermark
 			if (subGenerator.canGenerateEventAtWatermark(globalWatermark)) {
 
 				E event = subGenerator.generateEvent(globalWatermark);

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-tests/src/test/scala/org/apache/flink/api/scala/completeness/BatchScalaAPICompletenessTest.scala
----------------------------------------------------------------------
diff --git a/flink-tests/src/test/scala/org/apache/flink/api/scala/completeness/BatchScalaAPICompletenessTest.scala b/flink-tests/src/test/scala/org/apache/flink/api/scala/completeness/BatchScalaAPICompletenessTest.scala
index 76e8547..6dc2ac5 100644
--- a/flink-tests/src/test/scala/org/apache/flink/api/scala/completeness/BatchScalaAPICompletenessTest.scala
+++ b/flink-tests/src/test/scala/org/apache/flink/api/scala/completeness/BatchScalaAPICompletenessTest.scala
@@ -78,7 +78,7 @@ class BatchScalaAPICompletenessTest extends ScalaAPICompletenessTestBase {
        """^org\.apache\.flink\.api.java.*project""",
 
        // I don't want to have withParameters in the API since I consider Configuration to be
-       // deprecated. But maybe thats just me ...
+       // deprecated. But maybe that's just me ...
        """^org\.apache\.flink\.api.java.*withParameters""",
 
        // These are only used internally. Should be internal API but Java doesn't have

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-tests/src/test/scala/org/apache/flink/api/scala/operators/GroupingTest.scala
----------------------------------------------------------------------
diff --git a/flink-tests/src/test/scala/org/apache/flink/api/scala/operators/GroupingTest.scala b/flink-tests/src/test/scala/org/apache/flink/api/scala/operators/GroupingTest.scala
index 8f7b8bb..3181a45 100644
--- a/flink-tests/src/test/scala/org/apache/flink/api/scala/operators/GroupingTest.scala
+++ b/flink-tests/src/test/scala/org/apache/flink/api/scala/operators/GroupingTest.scala
@@ -71,7 +71,7 @@ class GroupingTest {
     val env = ExecutionEnvironment.getExecutionEnvironment
     val tupleDs = env.fromCollection(emptyTupleData)
 
-    // should not work, fiels position out of range
+    // should not work, field position out of range
     tupleDs.groupBy(5)
   }
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-yarn-tests/src/test/scala/org/apache/flink/yarn/TestingYarnTaskManager.scala
----------------------------------------------------------------------
diff --git a/flink-yarn-tests/src/test/scala/org/apache/flink/yarn/TestingYarnTaskManager.scala b/flink-yarn-tests/src/test/scala/org/apache/flink/yarn/TestingYarnTaskManager.scala
index 228eaaa..f54528f 100644
--- a/flink-yarn-tests/src/test/scala/org/apache/flink/yarn/TestingYarnTaskManager.scala
+++ b/flink-yarn-tests/src/test/scala/org/apache/flink/yarn/TestingYarnTaskManager.scala
@@ -36,7 +36,7 @@ import org.apache.flink.runtime.testingUtils.TestingTaskManagerLike
   * @param config Configuration object for the actor
   * @param resourceID The Yarn container id
   * @param connectionInfo Connection information of this actor
-  * @param memoryManager MemoryManager which is responsibel for Flink's managed memory allocation
+  * @param memoryManager MemoryManager which is responsible for Flink's managed memory allocation
   * @param ioManager IOManager responsible for I/O
   * @param network NetworkEnvironment for this actor
   * @param numberOfSlots Number of slots for this TaskManager

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-yarn/src/test/java/org/apache/flink/yarn/YarnClusterDescriptorTest.java
----------------------------------------------------------------------
diff --git a/flink-yarn/src/test/java/org/apache/flink/yarn/YarnClusterDescriptorTest.java b/flink-yarn/src/test/java/org/apache/flink/yarn/YarnClusterDescriptorTest.java
index 19d1af5..c11c413 100644
--- a/flink-yarn/src/test/java/org/apache/flink/yarn/YarnClusterDescriptorTest.java
+++ b/flink-yarn/src/test/java/org/apache/flink/yarn/YarnClusterDescriptorTest.java
@@ -291,7 +291,7 @@ public class YarnClusterDescriptorTest extends TestLogger {
 				.getCommands().get(0));
 
 		// logback + log4j, with/out krb5, different JVM opts
-		// IMPORTANT: Beaware that we are using side effects here to modify the created YarnClusterDescriptor
+		// IMPORTANT: Be aware that we are using side effects here to modify the created YarnClusterDescriptor
 		cfg.setString(CoreOptions.FLINK_JM_JVM_OPTIONS, jmJvmOpts);
 		assertEquals(
 			java + " " + jvmmem +
@@ -322,7 +322,7 @@ public class YarnClusterDescriptorTest extends TestLogger {
 				.getCommands().get(0));
 
 		// now try some configurations with different yarn.container-start-command-template
-		// IMPORTANT: Beaware that we are using side effects here to modify the created YarnClusterDescriptor
+		// IMPORTANT: Be aware that we are using side effects here to modify the created YarnClusterDescriptor
 		cfg.setString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE,
 			"%java% 1 %jvmmem% 2 %jvmopts% 3 %logging% 4 %class% 5 %args% 6 %redirects%");
 		assertEquals(
@@ -341,7 +341,7 @@ public class YarnClusterDescriptorTest extends TestLogger {
 
 		cfg.setString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE,
 			"%java% %logging% %jvmopts% %jvmmem% %class% %args% %redirects%");
-		// IMPORTANT: Beaware that we are using side effects here to modify the created YarnClusterDescriptor
+		// IMPORTANT: Be aware that we are using side effects here to modify the created YarnClusterDescriptor
 		assertEquals(
 			java +
 				" " + logfile + " " + logback + " " + log4j +

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/tools/create_release_files.sh
----------------------------------------------------------------------
diff --git a/tools/create_release_files.sh b/tools/create_release_files.sh
index 5d134e8..3e7a6e7 100755
--- a/tools/create_release_files.sh
+++ b/tools/create_release_files.sh
@@ -266,7 +266,7 @@ prepare
 
 make_source_release
 
-# build dist by input parameter of "--scala-vervion xxx --hadoop-version xxx"
+# build dist by input parameter of "--scala-version xxx --hadoop-version xxx"
 if [ "$SCALA_VERSION" == "none" ] && [ "$HADOOP_VERSION" == "none" ]; then
   make_binary_release "hadoop2" "" "2.11"
   make_binary_release "hadoop26" "-Dhadoop.version=2.6.5" "2.11"

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/tools/list_deps.py
----------------------------------------------------------------------
diff --git a/tools/list_deps.py b/tools/list_deps.py
index aba92d5..125ce14 100755
--- a/tools/list_deps.py
+++ b/tools/list_deps.py
@@ -23,7 +23,7 @@ import sys
 
 # This lists all dependencies in the Maven Project root given as first
 # argument. If a dependency is included in several versions it is listed once
-# for every version. The resul output is sorted. So this can be used
+# for every version. The result output is sorted. So this can be used
 # to get a diff between the Maven dependencies of two versions of a project.
 
 path = sys.argv[1]

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/tools/merge_flink_pr.py
----------------------------------------------------------------------
diff --git a/tools/merge_flink_pr.py b/tools/merge_flink_pr.py
index 76a7694..1799a55 100755
--- a/tools/merge_flink_pr.py
+++ b/tools/merge_flink_pr.py
@@ -46,7 +46,7 @@ except ImportError:
 
 # Location of your FLINK git development area
 FLINK_HOME = os.environ.get("FLINK_HOME", "/home/patrick/Documents/spark")
-# Remote name which points to the Gihub site
+# Remote name which points to the Github site
 PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "apache-github")
 # Remote name which points to Apache git
 PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache")


[17/19] flink git commit: [hotfix] [core] Pre-compile regex pattern in Path class

Posted by se...@apache.org.
[hotfix] [core] Pre-compile regex pattern in Path class


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/a49f0378
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/a49f0378
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/a49f0378

Branch: refs/heads/master
Commit: a49f0378c3ad7c9b02ea3a94e44e73e4dcbeafa3
Parents: 1d38e0b
Author: Stephan Ewen <se...@apache.org>
Authored: Wed Dec 13 15:07:52 2017 +0100
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:07 2018 +0100

----------------------------------------------------------------------
 .../src/main/java/org/apache/flink/core/fs/Path.java      | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/a49f0378/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/Path.java b/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
index 6398aa8..b463fd9 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
@@ -32,6 +32,7 @@ import java.io.IOException;
 import java.io.Serializable;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.regex.Pattern;
 
 /**
  * Names a file or directory in a {@link FileSystem}. Path strings use slash as
@@ -59,6 +60,9 @@ public class Path implements IOReadableWritable, Serializable {
 	 */
 	public static final String CUR_DIR = ".";
 
+	/** A pre-compiled regex/state-machine to match the windows drive pattern. */
+	private static final Pattern WINDOWS_ROOT_DIR_REGEX = Pattern.compile("/\\p{Alpha}+:/");
+
 	/**
 	 * The internal representation of the path, a hierarchical URI.
 	 */
@@ -262,9 +266,9 @@ public class Path implements IOReadableWritable, Serializable {
 		path = path.replaceAll("/+", "/");
 
 		// remove tailing separator
-		if (!path.equals(SEPARATOR) &&              // UNIX root path
-				!path.matches("/\\p{Alpha}+:/") &&  // Windows root path
-				path.endsWith(SEPARATOR)) {
+		if (path.endsWith(SEPARATOR) &&
+				!path.equals(SEPARATOR) &&              // UNIX root path
+				!WINDOWS_ROOT_DIR_REGEX.matcher(path).matches()) {  // Windows root path)
 
 			// remove tailing slash
 			path = path.substring(0, path.length() - SEPARATOR.length());


[12/19] flink git commit: [FLINK-8373] [core, hdfs] Ensure consistent semantics of FileSystem.mkdirs() across file system implementations.

Posted by se...@apache.org.
[FLINK-8373] [core, hdfs] Ensure consistent semantics of FileSystem.mkdirs() across file system implementations.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/3d0ed12e
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/3d0ed12e
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/3d0ed12e

Branch: refs/heads/master
Commit: 3d0ed12edab5e1b89db0829230e69fb6ef841b7e
Parents: fd13ed0
Author: Stephan Ewen <se...@apache.org>
Authored: Thu Dec 7 16:11:24 2017 +0100
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:06 2018 +0100

----------------------------------------------------------------------
 .../flink/core/fs/local/LocalFileSystem.java    |  29 ++-
 .../core/fs/FileSystemBehaviorTestSuite.java    | 210 +++++++++++++++++++
 .../java/org/apache/flink/core/fs/PathTest.java |  21 +-
 .../fs/local/LocalFileSystemBehaviorTest.java   |  51 +++++
 flink-filesystems/flink-hadoop-fs/pom.xml       |  26 +++
 .../hdfs/HadoopLocalFileSystemBehaviorTest.java |  74 +++++++
 .../flink/runtime/fs/hdfs/HdfsBehaviorTest.java |  98 +++++++++
 .../flink/runtime/fs/hdfs/HdfsKindTest.java     | 101 +++++++++
 flink-filesystems/flink-s3-fs-hadoop/pom.xml    |   9 +
 .../HadoopS3FileSystemBehaviorITCase.java       |  79 +++++++
 flink-filesystems/flink-s3-fs-presto/pom.xml    |   9 +
 .../PrestoS3FileSystemBehaviorITCase.java       |  79 +++++++
 .../flink/runtime/fs/hdfs/HdfsKindTest.java     | 101 ---------
 13 files changed, 765 insertions(+), 122 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/3d0ed12e/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java b/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
index c3e5a2f..d16108b 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
@@ -70,11 +70,11 @@ public class LocalFileSystem extends FileSystem {
 
 	/** Path pointing to the current working directory.
 	 * Because Paths are not immutable, we cannot cache the proper path here */
-	private final String workingDir;
+	private final URI workingDir;
 
 	/** Path pointing to the current working directory.
 	 * Because Paths are not immutable, we cannot cache the proper path here. */
-	private final String homeDir;
+	private final URI homeDir;
 
 	/** The host name of this machine. */
 	private final String hostName;
@@ -83,8 +83,8 @@ public class LocalFileSystem extends FileSystem {
 	 * Constructs a new <code>LocalFileSystem</code> object.
 	 */
 	public LocalFileSystem() {
-		this.workingDir = new Path(System.getProperty("user.dir")).makeQualified(this).toString();
-		this.homeDir = new Path(System.getProperty("user.home")).toString();
+		this.workingDir = new File(System.getProperty("user.dir")).toURI();
+		this.homeDir = new File(System.getProperty("user.home")).toURI();
 
 		String tmp = "unknownHost";
 		try {
@@ -229,14 +229,25 @@ public class LocalFileSystem extends FileSystem {
 	 */
 	@Override
 	public boolean mkdirs(final Path f) throws IOException {
-		final File p2f = pathToFile(f);
+		checkNotNull(f, "path is null");
+		return mkdirsInternal(pathToFile(f));
+	}
 
-		if (p2f.isDirectory()) {
-			return true;
+	private boolean mkdirsInternal(File file) throws IOException {
+		if (file.isDirectory()) {
+				return true;
 		}
+		else if (file.exists() && !file.isDirectory()) {
+			// Important: The 'exists()' check above must come before the 'isDirectory()' check to
+			//            be safe when multiple parallel instances try to create the directory
 
-		final Path parent = f.getParent();
-		return (parent == null || mkdirs(parent)) && (p2f.mkdir() || p2f.isDirectory());
+			// exists and is not a directory -> is a regular file
+			throw new FileAlreadyExistsException(file.getAbsolutePath());
+		}
+		else {
+			File parent = file.getParentFile();
+			return (parent == null || mkdirsInternal(parent)) && file.mkdir();
+		}
 	}
 
 	@Override

http://git-wip-us.apache.org/repos/asf/flink/blob/3d0ed12e/flink-core/src/test/java/org/apache/flink/core/fs/FileSystemBehaviorTestSuite.java
----------------------------------------------------------------------
diff --git a/flink-core/src/test/java/org/apache/flink/core/fs/FileSystemBehaviorTestSuite.java b/flink-core/src/test/java/org/apache/flink/core/fs/FileSystemBehaviorTestSuite.java
new file mode 100644
index 0000000..a06aff6
--- /dev/null
+++ b/flink-core/src/test/java/org/apache/flink/core/fs/FileSystemBehaviorTestSuite.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.core.fs;
+
+import org.apache.flink.core.fs.FileSystem.WriteMode;
+import org.apache.flink.util.StringUtils;
+
+import org.junit.After;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Random;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * Common tests for the behavior of {@link FileSystem} methods.
+ */
+public abstract class FileSystemBehaviorTestSuite {
+
+	private static final Random RND = new Random();
+
+	/** The cached file system instance. */
+	private FileSystem fs;
+
+	/** The cached base path. */
+	private Path basePath;
+
+	// ------------------------------------------------------------------------
+	//  FileSystem-specific methods
+	// ------------------------------------------------------------------------
+
+	/**
+	 * Gets an instance of the {@code FileSystem} to be tested.
+	 */
+	public abstract FileSystem getFileSystem() throws Exception;
+
+	/**
+	 * Gets the base path in the file system under which tests will place their temporary files.
+	 */
+	public abstract Path getBasePath() throws Exception;
+
+	/**
+	 * Gets the kind of the file system (file system, object store, ...).
+	 */
+	public abstract FileSystemKind getFileSystemKind();
+
+	// ------------------------------------------------------------------------
+	//  Init / Cleanup
+	// ------------------------------------------------------------------------
+
+	@Before
+	public void prepare() throws Exception {
+		fs = getFileSystem();
+		basePath = new Path(getBasePath(), randomName());
+		fs.mkdirs(basePath);
+	}
+
+	@After
+	public void cleanup() throws Exception {
+		fs.delete(basePath, true);
+	}
+
+	// ------------------------------------------------------------------------
+	//  Suite of Tests
+	// ------------------------------------------------------------------------
+
+	// --- file system kind
+
+	@Test
+	public void testFileSystemKind() {
+		assertEquals(getFileSystemKind(), fs.getKind());
+	}
+
+	// --- access and scheme
+
+	@Test
+	public void testPathAndScheme() throws Exception {
+		assertEquals(fs.getUri(), getBasePath().getFileSystem().getUri());
+		assertEquals(fs.getUri().getScheme(), getBasePath().toUri().getScheme());
+	}
+
+	@Test
+	public void testHomeAndWorkDir() {
+		assertEquals(fs.getUri().getScheme(), fs.getWorkingDirectory().toUri().getScheme());
+		assertEquals(fs.getUri().getScheme(), fs.getHomeDirectory().toUri().getScheme());
+	}
+
+	// --- mkdirs
+
+	@Test
+	public void testMkdirsReturnsTrueWhenCreatingDirectory() throws Exception {
+		// this test applies to object stores as well, as rely on the fact that they
+		// return true when things are not bad
+
+		final Path directory = new Path(basePath, randomName());
+		assertTrue(fs.mkdirs(directory));
+
+		if (getFileSystemKind() != FileSystemKind.OBJECT_STORE) {
+			assertTrue(fs.exists(directory));
+		}
+	}
+
+	@Test
+	public void testMkdirsCreatesParentDirectories() throws Exception {
+		// this test applies to object stores as well, as rely on the fact that they
+		// return true when things are not bad
+
+		final Path directory = new Path(new Path(new Path(basePath, randomName()), randomName()), randomName());
+		assertTrue(fs.mkdirs(directory));
+
+		if (getFileSystemKind() != FileSystemKind.OBJECT_STORE) {
+			assertTrue(fs.exists(directory));
+		}
+	}
+
+	@Test
+	public void testMkdirsReturnsTrueForExistingDirectory() throws Exception {
+		// this test applies to object stores as well, as rely on the fact that they
+		// return true when things are not bad
+
+		final Path directory = new Path(basePath, randomName());
+
+		// make sure the directory exists
+		createRandomFileInDirectory(directory);
+
+		assertTrue(fs.mkdirs(directory));
+	}
+
+	@Test
+	public void testMkdirsFailsForExistingFile() throws Exception {
+		// test is not defined for object stores, they have no proper notion
+		// of directories
+		assumeNotObjectStore();
+
+		final Path file = new Path(getBasePath(), randomName());
+		createFile(file);
+
+		try {
+			fs.mkdirs(file);
+			fail("should fail with an IOException");
+		}
+		catch (IOException e) {
+			// good!
+		}
+	}
+
+	@Test
+	public void testMkdirsFailsWithExistingParentFile() throws Exception {
+		// test is not defined for object stores, they have no proper notion
+		// of directories
+		assumeNotObjectStore();
+
+		final Path file = new Path(getBasePath(), randomName());
+		createFile(file);
+
+		final Path dirUnderFile = new Path(file, randomName());
+		try {
+			fs.mkdirs(dirUnderFile);
+			fail("should fail with an IOException");
+		}
+		catch (IOException e) {
+			// good!
+		}
+	}
+
+	// ------------------------------------------------------------------------
+	//  Utilities
+	// ------------------------------------------------------------------------
+
+	private static String randomName() {
+		return StringUtils.getRandomString(RND, 16, 16, 'a', 'z');
+	}
+
+	private void createFile(Path file) throws IOException {
+		try (FSDataOutputStream out = fs.create(file, WriteMode.NO_OVERWRITE)) {
+			out.write(new byte[] {1, 2, 3, 4, 5, 6, 7, 8});
+		}
+	}
+
+	private void createRandomFileInDirectory(Path directory) throws IOException {
+		fs.mkdirs(directory);
+		createFile(new Path(directory, randomName()));
+	}
+
+	private void assumeNotObjectStore() {
+		Assume.assumeTrue("Test does not apply to object stores",
+				getFileSystemKind() != FileSystemKind.OBJECT_STORE);
+	}
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/3d0ed12e/flink-core/src/test/java/org/apache/flink/core/fs/PathTest.java
----------------------------------------------------------------------
diff --git a/flink-core/src/test/java/org/apache/flink/core/fs/PathTest.java b/flink-core/src/test/java/org/apache/flink/core/fs/PathTest.java
index b4da2dc..6d53adb 100644
--- a/flink-core/src/test/java/org/apache/flink/core/fs/PathTest.java
+++ b/flink-core/src/test/java/org/apache/flink/core/fs/PathTest.java
@@ -287,23 +287,20 @@ public class PathTest {
 
 	@Test
 	public void testMakeQualified() throws IOException {
-		String path;
-		Path p;
-		URI u;
+		// make relative path qualified
+		String path = "test/test";
+		Path p  = new Path(path).makeQualified(FileSystem.getLocalFileSystem());
+		URI u = p.toUri();
 
-		path = "test/test";
-		p = new Path(path);
-		u = p.toUri();
-		p = p.makeQualified(FileSystem.get(u));
-		u = p.toUri();
 		assertEquals("file", u.getScheme());
 		assertEquals(null, u.getAuthority());
-		assertEquals(FileSystem.getLocalFileSystem().getWorkingDirectory().toUri().getPath() + "/" + path, u.getPath());
 
+		String q = new Path(FileSystem.getLocalFileSystem().getWorkingDirectory().getPath(), path).getPath();
+		assertEquals(q, u.getPath());
+
+		// make absolute path qualified
 		path = "/test/test";
-		p = new Path(path);
-		u = p.toUri();
-		p = p.makeQualified(FileSystem.get(u));
+		p = new Path(path).makeQualified(FileSystem.getLocalFileSystem());
 		u = p.toUri();
 		assertEquals("file", u.getScheme());
 		assertEquals(null, u.getAuthority());

http://git-wip-us.apache.org/repos/asf/flink/blob/3d0ed12e/flink-core/src/test/java/org/apache/flink/core/fs/local/LocalFileSystemBehaviorTest.java
----------------------------------------------------------------------
diff --git a/flink-core/src/test/java/org/apache/flink/core/fs/local/LocalFileSystemBehaviorTest.java b/flink-core/src/test/java/org/apache/flink/core/fs/local/LocalFileSystemBehaviorTest.java
new file mode 100644
index 0000000..2a8522a
--- /dev/null
+++ b/flink-core/src/test/java/org/apache/flink/core/fs/local/LocalFileSystemBehaviorTest.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.core.fs.local;
+
+import org.apache.flink.core.fs.FileSystem;
+import org.apache.flink.core.fs.FileSystemBehaviorTestSuite;
+import org.apache.flink.core.fs.FileSystemKind;
+import org.apache.flink.core.fs.Path;
+
+import org.junit.Rule;
+import org.junit.rules.TemporaryFolder;
+
+/**
+ * Behavior tests for Flink's {@link LocalFileSystem}.
+ */
+public class LocalFileSystemBehaviorTest extends FileSystemBehaviorTestSuite {
+
+	@Rule
+	public final TemporaryFolder tmp = new TemporaryFolder();
+
+	@Override
+	public FileSystem getFileSystem() throws Exception {
+		return LocalFileSystem.getSharedInstance();
+	}
+
+	@Override
+	public Path getBasePath() throws Exception {
+		return new Path(tmp.newFolder().toURI());
+	}
+
+	@Override
+	public FileSystemKind getFileSystemKind() {
+		return FileSystemKind.FILE_SYSTEM;
+	}
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/3d0ed12e/flink-filesystems/flink-hadoop-fs/pom.xml
----------------------------------------------------------------------
diff --git a/flink-filesystems/flink-hadoop-fs/pom.xml b/flink-filesystems/flink-hadoop-fs/pom.xml
index 3085e63..0e7fae3 100644
--- a/flink-filesystems/flink-hadoop-fs/pom.xml
+++ b/flink-filesystems/flink-hadoop-fs/pom.xml
@@ -57,6 +57,32 @@ under the License.
 			<version>${project.version}</version>
 			<scope>test</scope>
 		</dependency>
+
+		<!-- for the behavior test suite -->
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-core</artifactId>
+			<version>${project.version}</version>
+			<scope>test</scope>
+			<type>test-jar</type>
+		</dependency>
+
+		<!-- for the HDFS mini cluster test suite -->
+		<dependency>
+			<groupId>org.apache.hadoop</groupId>
+			<artifactId>hadoop-hdfs</artifactId>
+			<version>${hadoop.version}</version>
+			<scope>test</scope>
+			<type>test-jar</type>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.hadoop</groupId>
+			<artifactId>hadoop-common</artifactId>
+			<scope>test</scope>
+			<type>test-jar</type>
+			<version>${hadoop.version}</version><!--$NO-MVN-MAN-VER$-->
+		</dependency>
+
 	</dependencies>
 
 </project>

http://git-wip-us.apache.org/repos/asf/flink/blob/3d0ed12e/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopLocalFileSystemBehaviorTest.java
----------------------------------------------------------------------
diff --git a/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopLocalFileSystemBehaviorTest.java b/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopLocalFileSystemBehaviorTest.java
new file mode 100644
index 0000000..644744c
--- /dev/null
+++ b/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HadoopLocalFileSystemBehaviorTest.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.runtime.fs.hdfs;
+
+import org.apache.flink.core.fs.FileSystem;
+import org.apache.flink.core.fs.FileSystemBehaviorTestSuite;
+import org.apache.flink.core.fs.FileSystemKind;
+import org.apache.flink.core.fs.Path;
+import org.apache.flink.core.fs.local.LocalFileSystem;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.util.VersionInfo;
+import org.junit.Assume;
+import org.junit.Rule;
+import org.junit.rules.TemporaryFolder;
+
+/**
+ * Behavior tests for HDFS.
+ */
+public class HadoopLocalFileSystemBehaviorTest extends FileSystemBehaviorTestSuite {
+
+	@Rule
+	public final TemporaryFolder tmp = new TemporaryFolder();
+
+	@Override
+	public FileSystem getFileSystem() throws Exception {
+		org.apache.hadoop.fs.FileSystem fs = new RawLocalFileSystem();
+		fs.initialize(LocalFileSystem.getLocalFsURI(), new Configuration());
+		return new HadoopFileSystem(fs);
+	}
+
+	@Override
+	public Path getBasePath() throws Exception {
+		return new Path(tmp.newFolder().toURI());
+	}
+
+	@Override
+	public FileSystemKind getFileSystemKind() {
+		return FileSystemKind.FILE_SYSTEM;
+	}
+
+	// ------------------------------------------------------------------------
+
+	/**
+	 * This test needs to be skipped for earlier Hadoop versions because those
+	 * have a bug.
+	 */
+	@Override
+	public void testMkdirsFailsForExistingFile() throws Exception {
+		final String versionString = VersionInfo.getVersion();
+		final String prefix = versionString.substring(0, 3);
+		final float version = Float.parseFloat(prefix);
+		Assume.assumeTrue("Cannot execute this test on Hadoop prior to 2.8", version >= 2.8f);
+
+		super.testMkdirsFailsForExistingFile();
+	}
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/3d0ed12e/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HdfsBehaviorTest.java
----------------------------------------------------------------------
diff --git a/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HdfsBehaviorTest.java b/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HdfsBehaviorTest.java
new file mode 100644
index 0000000..ebcc663
--- /dev/null
+++ b/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HdfsBehaviorTest.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.runtime.fs.hdfs;
+
+import org.apache.flink.core.fs.FileSystem;
+import org.apache.flink.core.fs.FileSystemBehaviorTestSuite;
+import org.apache.flink.core.fs.FileSystemKind;
+import org.apache.flink.core.fs.Path;
+import org.apache.flink.util.OperatingSystem;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.AfterClass;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.rules.TemporaryFolder;
+
+import java.io.File;
+
+/**
+ * Behavior tests for HDFS.
+ */
+public class HdfsBehaviorTest extends FileSystemBehaviorTestSuite {
+
+	@ClassRule
+	public static final TemporaryFolder TMP = new TemporaryFolder();
+
+	private static MiniDFSCluster hdfsCluster;
+
+	private static FileSystem fs;
+
+	private static Path basePath;
+
+	// ------------------------------------------------------------------------
+
+	@BeforeClass
+	public static void verifyOS() {
+		Assume.assumeTrue("HDFS cluster cannot be started on Windows without extensions.", !OperatingSystem.isWindows());
+	}
+
+	@BeforeClass
+	public static void createHDFS() throws Exception {
+		final File baseDir = TMP.newFolder();
+
+		Configuration hdConf = new Configuration();
+		hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
+		MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
+		hdfsCluster = builder.build();
+
+		org.apache.hadoop.fs.FileSystem hdfs = hdfsCluster.getFileSystem();
+		fs = new HadoopFileSystem(hdfs);
+
+		basePath = new Path(hdfs.getUri().toString() + "/tests");
+	}
+
+	@AfterClass
+	public static void destroyHDFS() throws Exception {
+		if (hdfsCluster != null) {
+			hdfsCluster.getFileSystem().delete(new org.apache.hadoop.fs.Path(basePath.toUri()), true);
+			hdfsCluster.shutdown();
+		}
+	}
+
+	// ------------------------------------------------------------------------
+
+	@Override
+	public FileSystem getFileSystem() {
+		return fs;
+	}
+
+	@Override
+	public Path getBasePath() {
+		return basePath;
+	}
+
+	@Override
+	public FileSystemKind getFileSystemKind() {
+		// this tests tests only HDFS, so it should be a file system
+		return FileSystemKind.FILE_SYSTEM;
+	}
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/3d0ed12e/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HdfsKindTest.java
----------------------------------------------------------------------
diff --git a/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HdfsKindTest.java b/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HdfsKindTest.java
new file mode 100644
index 0000000..69ecdb8
--- /dev/null
+++ b/flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/HdfsKindTest.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.runtime.fs.hdfs;
+
+import org.apache.flink.core.fs.FileSystem;
+import org.apache.flink.core.fs.FileSystemKind;
+import org.apache.flink.core.fs.Path;
+import org.apache.flink.util.TestLogger;
+
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Tests for extracting the {@link FileSystemKind} from file systems that Flink
+ * accesses through Hadoop's File System interface.
+ *
+ * <p>This class needs to be in this package, because it accesses package private methods
+ * from the HDFS file system wrapper class.
+ */
+public class HdfsKindTest extends TestLogger {
+
+	@Test
+	public void testHdfsKind() throws IOException {
+		final FileSystem fs = new Path("hdfs://localhost:55445/my/file").getFileSystem();
+		assertEquals(FileSystemKind.FILE_SYSTEM, fs.getKind());
+	}
+
+	@Test
+	public void testS3Kind() throws IOException {
+		try {
+			Class.forName("org.apache.hadoop.fs.s3.S3FileSystem");
+		} catch (ClassNotFoundException ignored) {
+			// not in the classpath, cannot run this test
+			log.info("Skipping test 'testS3Kind()' because the S3 file system is not in the class path");
+			return;
+		}
+
+		final FileSystem s3 = new Path("s3://myId:mySecret@bucket/some/bucket/some/object").getFileSystem();
+		assertEquals(FileSystemKind.OBJECT_STORE, s3.getKind());
+	}
+
+	@Test
+	public void testS3nKind() throws IOException {
+		try {
+			Class.forName("org.apache.hadoop.fs.s3native.NativeS3FileSystem");
+		} catch (ClassNotFoundException ignored) {
+			// not in the classpath, cannot run this test
+			log.info("Skipping test 'testS3nKind()' because the Native S3 file system is not in the class path");
+			return;
+		}
+
+		final FileSystem s3n = new Path("s3n://myId:mySecret@bucket/some/bucket/some/object").getFileSystem();
+		assertEquals(FileSystemKind.OBJECT_STORE, s3n.getKind());
+	}
+
+	@Test
+	public void testS3aKind() throws IOException {
+		try {
+			Class.forName("org.apache.hadoop.fs.s3a.S3AFileSystem");
+		} catch (ClassNotFoundException ignored) {
+			// not in the classpath, cannot run this test
+			log.info("Skipping test 'testS3aKind()' because the S3AFileSystem is not in the class path");
+			return;
+		}
+
+		final FileSystem s3a = new Path("s3a://myId:mySecret@bucket/some/bucket/some/object").getFileSystem();
+		assertEquals(FileSystemKind.OBJECT_STORE, s3a.getKind());
+	}
+
+	@Test
+	public void testS3fileSystemSchemes() {
+		assertEquals(FileSystemKind.OBJECT_STORE, HadoopFileSystem.getKindForScheme("s3"));
+		assertEquals(FileSystemKind.OBJECT_STORE, HadoopFileSystem.getKindForScheme("s3n"));
+		assertEquals(FileSystemKind.OBJECT_STORE, HadoopFileSystem.getKindForScheme("s3a"));
+		assertEquals(FileSystemKind.OBJECT_STORE, HadoopFileSystem.getKindForScheme("EMRFS"));
+	}
+
+	@Test
+	public void testViewFs() {
+		assertEquals(FileSystemKind.FILE_SYSTEM, HadoopFileSystem.getKindForScheme("viewfs"));
+	}
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/3d0ed12e/flink-filesystems/flink-s3-fs-hadoop/pom.xml
----------------------------------------------------------------------
diff --git a/flink-filesystems/flink-s3-fs-hadoop/pom.xml b/flink-filesystems/flink-s3-fs-hadoop/pom.xml
index 093ee08..76d43f8 100644
--- a/flink-filesystems/flink-s3-fs-hadoop/pom.xml
+++ b/flink-filesystems/flink-s3-fs-hadoop/pom.xml
@@ -183,6 +183,15 @@ under the License.
 			<version>${project.version}</version>
 			<scope>test</scope>
 		</dependency>
+
+		<!-- for the behavior test suite -->
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-core</artifactId>
+			<version>${project.version}</version>
+			<scope>test</scope>
+			<type>test-jar</type>
+		</dependency>
 	</dependencies>
 
 	<!-- We need to bump the AWS dependencies compared to the ones referenced

http://git-wip-us.apache.org/repos/asf/flink/blob/3d0ed12e/flink-filesystems/flink-s3-fs-hadoop/src/test/java/org/apache/flink/fs/s3hadoop/HadoopS3FileSystemBehaviorITCase.java
----------------------------------------------------------------------
diff --git a/flink-filesystems/flink-s3-fs-hadoop/src/test/java/org/apache/flink/fs/s3hadoop/HadoopS3FileSystemBehaviorITCase.java b/flink-filesystems/flink-s3-fs-hadoop/src/test/java/org/apache/flink/fs/s3hadoop/HadoopS3FileSystemBehaviorITCase.java
new file mode 100644
index 0000000..c8aaaee
--- /dev/null
+++ b/flink-filesystems/flink-s3-fs-hadoop/src/test/java/org/apache/flink/fs/s3hadoop/HadoopS3FileSystemBehaviorITCase.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.fs.s3hadoop;
+
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.core.fs.FileSystem;
+import org.apache.flink.core.fs.FileSystemBehaviorTestSuite;
+import org.apache.flink.core.fs.FileSystemKind;
+import org.apache.flink.core.fs.Path;
+
+import org.junit.AfterClass;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+import java.util.UUID;
+
+/**
+ * An implementation of the {@link FileSystemBehaviorTestSuite} for the s3a-based S3 file system.
+ */
+public class HadoopS3FileSystemBehaviorITCase extends FileSystemBehaviorTestSuite {
+
+	private static final String BUCKET = System.getenv("ARTIFACTS_AWS_BUCKET");
+
+	private static final String TEST_DATA_DIR = "tests-" + UUID.randomUUID();
+
+	private static final String ACCESS_KEY = System.getenv("ARTIFACTS_AWS_ACCESS_KEY");
+	private static final String SECRET_KEY = System.getenv("ARTIFACTS_AWS_SECRET_KEY");
+
+	@BeforeClass
+	public static void checkCredentialsAndSetup() throws IOException {
+		// check whether credentials exist
+		Assume.assumeTrue("AWS S3 bucket not configured, skipping test...", BUCKET != null);
+		Assume.assumeTrue("AWS S3 access key not configured, skipping test...", ACCESS_KEY != null);
+		Assume.assumeTrue("AWS S3 secret key not configured, skipping test...", SECRET_KEY != null);
+
+		// initialize configuration with valid credentials
+		final Configuration conf = new Configuration();
+		conf.setString("s3.access.key", ACCESS_KEY);
+		conf.setString("s3.secret.key", SECRET_KEY);
+		FileSystem.initialize(conf);
+	}
+
+	@AfterClass
+	public static void clearFsConfig() throws IOException {
+		FileSystem.initialize(new Configuration());
+	}
+
+	@Override
+	public FileSystem getFileSystem() throws Exception {
+		return getBasePath().getFileSystem();
+	}
+
+	@Override
+	public Path getBasePath() throws Exception {
+		return new Path("s3://" + BUCKET + '/' + TEST_DATA_DIR);
+	}
+
+	@Override
+	public FileSystemKind getFileSystemKind() {
+		return FileSystemKind.OBJECT_STORE;
+	}
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/3d0ed12e/flink-filesystems/flink-s3-fs-presto/pom.xml
----------------------------------------------------------------------
diff --git a/flink-filesystems/flink-s3-fs-presto/pom.xml b/flink-filesystems/flink-s3-fs-presto/pom.xml
index b871032..4bfc2f1 100644
--- a/flink-filesystems/flink-s3-fs-presto/pom.xml
+++ b/flink-filesystems/flink-s3-fs-presto/pom.xml
@@ -206,6 +206,15 @@ under the License.
 			<version>${project.version}</version>
 			<scope>test</scope>
 		</dependency>
+
+		<!-- for the behavior test suite -->
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-core</artifactId>
+			<version>${project.version}</version>
+			<scope>test</scope>
+			<type>test-jar</type>
+		</dependency>
 	</dependencies>
 
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3d0ed12e/flink-filesystems/flink-s3-fs-presto/src/test/java/org/apache/flink/fs/s3presto/PrestoS3FileSystemBehaviorITCase.java
----------------------------------------------------------------------
diff --git a/flink-filesystems/flink-s3-fs-presto/src/test/java/org/apache/flink/fs/s3presto/PrestoS3FileSystemBehaviorITCase.java b/flink-filesystems/flink-s3-fs-presto/src/test/java/org/apache/flink/fs/s3presto/PrestoS3FileSystemBehaviorITCase.java
new file mode 100644
index 0000000..812404c
--- /dev/null
+++ b/flink-filesystems/flink-s3-fs-presto/src/test/java/org/apache/flink/fs/s3presto/PrestoS3FileSystemBehaviorITCase.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.fs.s3presto;
+
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.core.fs.FileSystem;
+import org.apache.flink.core.fs.FileSystemBehaviorTestSuite;
+import org.apache.flink.core.fs.FileSystemKind;
+import org.apache.flink.core.fs.Path;
+
+import org.junit.AfterClass;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+import java.util.UUID;
+
+/**
+ * An implementation of the {@link FileSystemBehaviorTestSuite} for the s3a-based S3 file system.
+ */
+public class PrestoS3FileSystemBehaviorITCase extends FileSystemBehaviorTestSuite {
+
+	private static final String BUCKET = System.getenv("ARTIFACTS_AWS_BUCKET");
+
+	private static final String TEST_DATA_DIR = "tests-" + UUID.randomUUID();
+
+	private static final String ACCESS_KEY = System.getenv("ARTIFACTS_AWS_ACCESS_KEY");
+	private static final String SECRET_KEY = System.getenv("ARTIFACTS_AWS_SECRET_KEY");
+
+	@BeforeClass
+	public static void checkCredentialsAndSetup() throws IOException {
+		// check whether credentials exist
+		Assume.assumeTrue("AWS S3 bucket not configured, skipping test...", BUCKET != null);
+		Assume.assumeTrue("AWS S3 access key not configured, skipping test...", ACCESS_KEY != null);
+		Assume.assumeTrue("AWS S3 secret key not configured, skipping test...", SECRET_KEY != null);
+
+		// initialize configuration with valid credentials
+		final Configuration conf = new Configuration();
+		conf.setString("s3.access.key", ACCESS_KEY);
+		conf.setString("s3.secret.key", SECRET_KEY);
+		FileSystem.initialize(conf);
+	}
+
+	@AfterClass
+	public static void clearFsConfig() throws IOException {
+		FileSystem.initialize(new Configuration());
+	}
+
+	@Override
+	public FileSystem getFileSystem() throws Exception {
+		return getBasePath().getFileSystem();
+	}
+
+	@Override
+	public Path getBasePath() throws Exception {
+		return new Path("s3://" + BUCKET + '/' + TEST_DATA_DIR);
+	}
+
+	@Override
+	public FileSystemKind getFileSystemKind() {
+		return FileSystemKind.OBJECT_STORE;
+	}
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/3d0ed12e/flink-fs-tests/src/test/java/org/apache/flink/runtime/fs/hdfs/HdfsKindTest.java
----------------------------------------------------------------------
diff --git a/flink-fs-tests/src/test/java/org/apache/flink/runtime/fs/hdfs/HdfsKindTest.java b/flink-fs-tests/src/test/java/org/apache/flink/runtime/fs/hdfs/HdfsKindTest.java
deleted file mode 100644
index 69ecdb8..0000000
--- a/flink-fs-tests/src/test/java/org/apache/flink/runtime/fs/hdfs/HdfsKindTest.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.runtime.fs.hdfs;
-
-import org.apache.flink.core.fs.FileSystem;
-import org.apache.flink.core.fs.FileSystemKind;
-import org.apache.flink.core.fs.Path;
-import org.apache.flink.util.TestLogger;
-
-import org.junit.Test;
-
-import java.io.IOException;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Tests for extracting the {@link FileSystemKind} from file systems that Flink
- * accesses through Hadoop's File System interface.
- *
- * <p>This class needs to be in this package, because it accesses package private methods
- * from the HDFS file system wrapper class.
- */
-public class HdfsKindTest extends TestLogger {
-
-	@Test
-	public void testHdfsKind() throws IOException {
-		final FileSystem fs = new Path("hdfs://localhost:55445/my/file").getFileSystem();
-		assertEquals(FileSystemKind.FILE_SYSTEM, fs.getKind());
-	}
-
-	@Test
-	public void testS3Kind() throws IOException {
-		try {
-			Class.forName("org.apache.hadoop.fs.s3.S3FileSystem");
-		} catch (ClassNotFoundException ignored) {
-			// not in the classpath, cannot run this test
-			log.info("Skipping test 'testS3Kind()' because the S3 file system is not in the class path");
-			return;
-		}
-
-		final FileSystem s3 = new Path("s3://myId:mySecret@bucket/some/bucket/some/object").getFileSystem();
-		assertEquals(FileSystemKind.OBJECT_STORE, s3.getKind());
-	}
-
-	@Test
-	public void testS3nKind() throws IOException {
-		try {
-			Class.forName("org.apache.hadoop.fs.s3native.NativeS3FileSystem");
-		} catch (ClassNotFoundException ignored) {
-			// not in the classpath, cannot run this test
-			log.info("Skipping test 'testS3nKind()' because the Native S3 file system is not in the class path");
-			return;
-		}
-
-		final FileSystem s3n = new Path("s3n://myId:mySecret@bucket/some/bucket/some/object").getFileSystem();
-		assertEquals(FileSystemKind.OBJECT_STORE, s3n.getKind());
-	}
-
-	@Test
-	public void testS3aKind() throws IOException {
-		try {
-			Class.forName("org.apache.hadoop.fs.s3a.S3AFileSystem");
-		} catch (ClassNotFoundException ignored) {
-			// not in the classpath, cannot run this test
-			log.info("Skipping test 'testS3aKind()' because the S3AFileSystem is not in the class path");
-			return;
-		}
-
-		final FileSystem s3a = new Path("s3a://myId:mySecret@bucket/some/bucket/some/object").getFileSystem();
-		assertEquals(FileSystemKind.OBJECT_STORE, s3a.getKind());
-	}
-
-	@Test
-	public void testS3fileSystemSchemes() {
-		assertEquals(FileSystemKind.OBJECT_STORE, HadoopFileSystem.getKindForScheme("s3"));
-		assertEquals(FileSystemKind.OBJECT_STORE, HadoopFileSystem.getKindForScheme("s3n"));
-		assertEquals(FileSystemKind.OBJECT_STORE, HadoopFileSystem.getKindForScheme("s3a"));
-		assertEquals(FileSystemKind.OBJECT_STORE, HadoopFileSystem.getKindForScheme("EMRFS"));
-	}
-
-	@Test
-	public void testViewFs() {
-		assertEquals(FileSystemKind.FILE_SYSTEM, HadoopFileSystem.getKindForScheme("viewfs"));
-	}
-}


[11/19] flink git commit: [hotfix] Fix typo in TestableKinesisDataFetcher

Posted by se...@apache.org.
[hotfix] Fix typo in TestableKinesisDataFetcher

This closes #5178


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/87749b93
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/87749b93
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/87749b93

Branch: refs/heads/master
Commit: 87749b93c9b05574737cd96b4a37ed1b71a74031
Parents: 1eaef6a
Author: Cristian <me...@cristian.io>
Authored: Mon Dec 18 15:56:03 2017 -0800
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:06 2018 +0100

----------------------------------------------------------------------
 .../kinesis/testutils/TestableKinesisDataFetcher.java     | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/87749b93/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
index d111546..5d76262 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
@@ -53,7 +53,7 @@ public class TestableKinesisDataFetcher extends KinesisDataFetcher<String> {
 			List<String> fakeStreams,
 			Properties fakeConfiguration,
 			int fakeTotalCountOfSubtasks,
-			int fakeTndexOfThisSubtask,
+			int fakeIndexOfThisSubtask,
 			AtomicReference<Throwable> thrownErrorUnderTest,
 			LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest,
 			HashMap<String, String> subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
@@ -61,7 +61,7 @@ public class TestableKinesisDataFetcher extends KinesisDataFetcher<String> {
 		super(fakeStreams,
 			getMockedSourceContext(),
 			fakeCheckpointLock,
-			getMockedRuntimeContext(fakeTotalCountOfSubtasks, fakeTndexOfThisSubtask),
+			getMockedRuntimeContext(fakeTotalCountOfSubtasks, fakeIndexOfThisSubtask),
 			fakeConfiguration,
 			new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
 			thrownErrorUnderTest,
@@ -105,7 +105,7 @@ public class TestableKinesisDataFetcher extends KinesisDataFetcher<String> {
 		return Mockito.mock(SourceFunction.SourceContext.class);
 	}
 
-	private static RuntimeContext getMockedRuntimeContext(final int fakeTotalCountOfSubtasks, final int fakeTndexOfThisSubtask) {
+	private static RuntimeContext getMockedRuntimeContext(final int fakeTotalCountOfSubtasks, final int fakeIndexOfThisSubtask) {
 		RuntimeContext mockedRuntimeContext = Mockito.mock(RuntimeContext.class);
 
 		Mockito.when(mockedRuntimeContext.getNumberOfParallelSubtasks()).thenAnswer(new Answer<Integer>() {
@@ -118,7 +118,7 @@ public class TestableKinesisDataFetcher extends KinesisDataFetcher<String> {
 		Mockito.when(mockedRuntimeContext.getIndexOfThisSubtask()).thenAnswer(new Answer<Integer>() {
 			@Override
 			public Integer answer(InvocationOnMock invocationOnMock) throws Throwable {
-				return fakeTndexOfThisSubtask;
+				return fakeIndexOfThisSubtask;
 			}
 		});
 
@@ -132,7 +132,7 @@ public class TestableKinesisDataFetcher extends KinesisDataFetcher<String> {
 		Mockito.when(mockedRuntimeContext.getTaskNameWithSubtasks()).thenAnswer(new Answer<String>() {
 			@Override
 			public String answer(InvocationOnMock invocationOnMock) throws Throwable {
-				return "Fake Task (" + fakeTndexOfThisSubtask + "/" + fakeTotalCountOfSubtasks + ")";
+				return "Fake Task (" + fakeIndexOfThisSubtask + "/" + fakeTotalCountOfSubtasks + ")";
 			}
 		});
 


[13/19] flink git commit: [FLINK-8346][docs] add v4 signature workaround for manual S3 setups

Posted by se...@apache.org.
[FLINK-8346][docs] add v4 signature workaround for manual S3 setups

This closes #5231


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/fd13ed09
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/fd13ed09
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/fd13ed09

Branch: refs/heads/master
Commit: fd13ed09d4dfeea04be3acb7856fe97ac4ae6c32
Parents: 2c57cc0
Author: Nico Kruber <ni...@data-artisans.com>
Authored: Tue Jan 2 14:06:18 2018 +0100
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:06 2018 +0100

----------------------------------------------------------------------
 docs/ops/deployment/aws.md | 22 ++++++++++++++++++----
 1 file changed, 18 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/fd13ed09/docs/ops/deployment/aws.md
----------------------------------------------------------------------
diff --git a/docs/ops/deployment/aws.md b/docs/ops/deployment/aws.md
index d9d2647..7ef95e7 100644
--- a/docs/ops/deployment/aws.md
+++ b/docs/ops/deployment/aws.md
@@ -397,15 +397,29 @@ Caused by: java.lang.ClassNotFoundException: Class org.apache.hadoop.fs.s3native
 
 If you have configured everything properly, but get a `Bad Request` Exception **and** your S3 bucket is located in region `eu-central-1`, you might be running an S3 client, which does not support [Amazon's signature version 4](http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html).
 
-Currently, this includes all Hadoop versions up to 2.7.2 running `NativeS3FileSystem`, which depend on `JetS3t 0.9.0` instead of a version [>= 0.9.4](http://www.jets3t.org/RELEASE_NOTES.html).
-
-The only workaround is to change the bucket region.
-
 ```
 [...]
 Caused by: java.io.IOException: s3://<bucket-in-eu-central-1>/<endpoint> : 400 : Bad Request [...]
 Caused by: org.jets3t.service.impl.rest.HttpException [...]
 ```
+or
+```
+com.amazonaws.services.s3.model.AmazonS3Exception: Status Code: 400, AWS Service: Amazon S3, AWS Request ID: [...], AWS Error Code: null, AWS Error Message: Bad Request, S3 Extended Request ID: [...]
+
+```
+
+This should not apply to our shaded Hadoop/Presto S3 file systems but can occur for Hadoop-provided
+S3 file systems. In particular, all Hadoop versions up to 2.7.2 running `NativeS3FileSystem` (which
+depend on `JetS3t 0.9.0` instead of a version [>= 0.9.4](http://www.jets3t.org/RELEASE_NOTES.html))
+are affected but users also reported this happening with the `S3AFileSystem`.
+
+Except for changing the bucket region, you may also be able to solve this by
+[requesting signature version 4 for request authentication](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version),
+e.g. by adding this to Flink's JVM options in `flink-conf.yaml` (see
+[configuration](../config.html#common-options)):
+```
+env.java.opts: -Dcom.amazonaws.services.s3.enableV4
+```
 
 {% top %}
 


[10/19] flink git commit: [FLINK-8359] [docs] Update copyright date in NOTICE

Posted by se...@apache.org.
[FLINK-8359] [docs] Update copyright date in NOTICE

This closes #5238


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/1eaef6ab
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/1eaef6ab
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/1eaef6ab

Branch: refs/heads/master
Commit: 1eaef6abf4839194a12b19a038a1ec480a037783
Parents: 3bc293e
Author: yew1eb <ye...@gmail.com>
Authored: Thu Jan 4 20:13:46 2018 +0800
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:06 2018 +0100

----------------------------------------------------------------------
 NOTICE | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/1eaef6ab/NOTICE
----------------------------------------------------------------------
diff --git a/NOTICE b/NOTICE
index cbd68c9..da67d0e 100644
--- a/NOTICE
+++ b/NOTICE
@@ -1,5 +1,5 @@
 Apache Flink
-Copyright 2014-2017 The Apache Software Foundation
+Copyright 2014-2018 The Apache Software Foundation
 
 This product includes software developed at
 The Apache Software Foundation (http://www.apache.org/).


[14/19] flink git commit: [hotfix] [misc] Fix some typos

Posted by se...@apache.org.
[hotfix] [misc] Fix some typos

This closes #5204


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/2c57cc0b
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/2c57cc0b
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/2c57cc0b

Branch: refs/heads/master
Commit: 2c57cc0bc9a4f46d343ad566f2b8b4b8da99d58d
Parents: 87749b9
Author: 王振涛 <wa...@zhiweicloud.com>
Authored: Mon Dec 18 12:15:25 2017 +0800
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:06 2018 +0100

----------------------------------------------------------------------
 .../main/java/org/apache/flink/client/program/ClusterClient.java  | 2 +-
 .../org/apache/flink/api/common/io/ReplicatingInputFormat.java    | 2 +-
 .../org/apache/flink/api/common/operators/DualInputOperator.java  | 2 +-
 .../flink/api/common/operators/base/GroupReduceOperatorBase.java  | 3 +--
 4 files changed, 4 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/2c57cc0b/flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
----------------------------------------------------------------------
diff --git a/flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java b/flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
index 5b30223..3515363 100644
--- a/flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
+++ b/flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
@@ -527,7 +527,7 @@ public abstract class ClusterClient {
 	}
 
 	/**
-	 * Reattaches to a running from from the supplied job id.
+	 * Reattaches to a running from the supplied job id.
 	 * @param jobID The job id of the job to attach to
 	 * @return The JobExecutionResult for the jobID
 	 * @throws JobExecutionException if an error occurs during monitoring the job execution

http://git-wip-us.apache.org/repos/asf/flink/blob/2c57cc0b/flink-core/src/main/java/org/apache/flink/api/common/io/ReplicatingInputFormat.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/io/ReplicatingInputFormat.java b/flink-core/src/main/java/org/apache/flink/api/common/io/ReplicatingInputFormat.java
index c8aa591..3e64cc4 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/io/ReplicatingInputFormat.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/io/ReplicatingInputFormat.java
@@ -38,7 +38,7 @@ import java.io.IOException;
  * Replicated data can only be used as input for a {@link InnerJoinOperatorBase} or
  * {@link org.apache.flink.api.common.operators.base.CrossOperatorBase} with the same parallelism as the DataSource.
  * Before being used as an input to a Join or Cross operator, replicated data might be processed in local pipelines by
- * by Map-based operators with the same parallelism as the source. Map-based operators are
+ * Map-based operators with the same parallelism as the source. Map-based operators are
  * {@link org.apache.flink.api.common.operators.base.MapOperatorBase},
  * {@link org.apache.flink.api.common.operators.base.FlatMapOperatorBase},
  * {@link org.apache.flink.api.common.operators.base.FilterOperatorBase}, and

http://git-wip-us.apache.org/repos/asf/flink/blob/2c57cc0b/flink-core/src/main/java/org/apache/flink/api/common/operators/DualInputOperator.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/operators/DualInputOperator.java b/flink-core/src/main/java/org/apache/flink/api/common/operators/DualInputOperator.java
index d21160e..fac7a10 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/operators/DualInputOperator.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/operators/DualInputOperator.java
@@ -29,7 +29,7 @@ import org.apache.flink.api.common.operators.util.UserCodeWrapper;
 import org.apache.flink.util.Visitor;
 
 /**
- * Abstract operator superclass for for all operators that have two inputs, like "Join", "CoGroup", or "Cross".
+ * Abstract operator superclass for all operators that have two inputs, like "Join", "CoGroup", or "Cross".
  *
  * @param <IN1> First input type of the user function
  * @param <IN2> Second input type of the user function

http://git-wip-us.apache.org/repos/asf/flink/blob/2c57cc0b/flink-core/src/main/java/org/apache/flink/api/common/operators/base/GroupReduceOperatorBase.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/operators/base/GroupReduceOperatorBase.java b/flink-core/src/main/java/org/apache/flink/api/common/operators/base/GroupReduceOperatorBase.java
index 0794a77..595fb30 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/operators/base/GroupReduceOperatorBase.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/operators/base/GroupReduceOperatorBase.java
@@ -198,8 +198,8 @@ public class GroupReduceOperatorBase<IN, OUT, FT extends GroupReduceFunction<IN,
 		ArrayList<OUT> result = new ArrayList<OUT>();
 
 		if (inputData.size() > 0) {
+			final TypeSerializer<IN> inputSerializer = inputType.createSerializer(executionConfig);
 			if (keyColumns.length == 0) {
-				final TypeSerializer<IN> inputSerializer = inputType.createSerializer(executionConfig);
 				TypeSerializer<OUT> outSerializer = getOperatorInfo().getOutputType().createSerializer(executionConfig);
 				List<IN> inputDataCopy = new ArrayList<IN>(inputData.size());
 				for (IN in : inputData) {
@@ -209,7 +209,6 @@ public class GroupReduceOperatorBase<IN, OUT, FT extends GroupReduceFunction<IN,
 
 				function.reduce(inputDataCopy, collector);
 			} else {
-				final TypeSerializer<IN> inputSerializer = inputType.createSerializer(executionConfig);
 				boolean[] keyOrderings = new boolean[keyColumns.length];
 				final TypeComparator<IN> comparator = getTypeComparator(inputType, keyColumns, keyOrderings, executionConfig);
 


[02/19] flink git commit: [hotfix] Fix many many typos

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/hadoop/conf/Configuration.java b/flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/hadoop/conf/Configuration.java
index 16f162d..86822de 100644
--- a/flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/flink-filesystems/flink-s3-fs-hadoop/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -19,6 +19,43 @@
 package org.apache.hadoop.conf;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+import org.apache.commons.collections.map.UnmodifiableMap;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.alias.CredentialProvider;
+import org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringInterner;
+import org.apache.hadoop.util.StringUtils;
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.JsonGenerator;
+import org.w3c.dom.DOMException;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.w3c.dom.Text;
+import org.xml.sax.SAXException;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerException;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
 
 import java.io.BufferedInputStream;
 import java.io.DataInput;
@@ -56,51 +93,12 @@ import java.util.StringTokenizer;
 import java.util.WeakHashMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import java.util.regex.PatternSyntaxException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-import javax.xml.transform.Transformer;
-import javax.xml.transform.TransformerException;
-import javax.xml.transform.TransformerFactory;
-import javax.xml.transform.dom.DOMSource;
-import javax.xml.transform.stream.StreamResult;
-
-import com.google.common.base.Charsets;
-import org.apache.commons.collections.map.UnmodifiableMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.alias.CredentialProvider;
-import org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry;
-import org.apache.hadoop.security.alias.CredentialProviderFactory;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.util.StringInterner;
-import org.apache.hadoop.util.StringUtils;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.JsonGenerator;
-import org.w3c.dom.DOMException;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.w3c.dom.Text;
-import org.xml.sax.SAXException;
-
-import com.google.common.base.Preconditions;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.regex.PatternSyntaxException;
 
 /** 
  * Provides access to configuration parameters.
@@ -1937,7 +1935,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * Get the value for a known password configuration element.
    * In order to enable the elimination of clear text passwords in config,
    * this method attempts to resolve the property name as an alias through
-   * the CredentialProvider API and conditionally fallsback to config.
+   * the CredentialProvider API and conditionally falls back to config.
    * @param name property name
    * @return password
    */

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-java/src/test/java/org/apache/flink/api/java/io/CsvInputFormatTest.java
----------------------------------------------------------------------
diff --git a/flink-java/src/test/java/org/apache/flink/api/java/io/CsvInputFormatTest.java b/flink-java/src/test/java/org/apache/flink/api/java/io/CsvInputFormatTest.java
index 8939c5a..99c569c 100644
--- a/flink-java/src/test/java/org/apache/flink/api/java/io/CsvInputFormatTest.java
+++ b/flink-java/src/test/java/org/apache/flink/api/java/io/CsvInputFormatTest.java
@@ -808,7 +808,7 @@ public class CsvInputFormatTest {
 
 	}
 
-	// Test disabled becase we do not support double-quote escaped quotes right now.
+	// Test disabled because we do not support double-quote escaped quotes right now.
 	// @Test
 	public void testParserCorrectness() throws Exception {
 		// RFC 4180 Compliance Test content
@@ -875,13 +875,13 @@ public class CsvInputFormatTest {
 	@Test
 	public void testWindowsLineEndRemoval() {
 
-		//Check typical use case -- linux file is correct and it is set up to linuc(\n)
+		//Check typical use case -- linux file is correct and it is set up to linux (\n)
 		this.testRemovingTrailingCR("\n", "\n");
 
 		//Check typical windows case -- windows file endings and file has windows file endings set up
 		this.testRemovingTrailingCR("\r\n", "\r\n");
 
-		//Check problematic case windows file -- windows file endings(\r\n) but linux line endings (\n) set up
+		//Check problematic case windows file -- windows file endings (\r\n) but linux line endings (\n) set up
 		this.testRemovingTrailingCR("\r\n", "\n");
 
 		//Check problematic case linux file -- linux file endings (\n) but windows file endings set up (\r\n)

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-java/src/test/java/org/apache/flink/api/java/operator/MaxByOperatorTest.java
----------------------------------------------------------------------
diff --git a/flink-java/src/test/java/org/apache/flink/api/java/operator/MaxByOperatorTest.java b/flink-java/src/test/java/org/apache/flink/api/java/operator/MaxByOperatorTest.java
index b207e19..5d00aa2 100644
--- a/flink-java/src/test/java/org/apache/flink/api/java/operator/MaxByOperatorTest.java
+++ b/flink-java/src/test/java/org/apache/flink/api/java/operator/MaxByOperatorTest.java
@@ -69,7 +69,7 @@ public class MaxByOperatorTest {
 	private final List<CustomType> customTypeData = new ArrayList<CustomType>();
 
 	/**
-	 * This test validates that an InvalidProgrammException is thrown when maxBy
+	 * This test validates that an InvalidProgramException is thrown when maxBy
 	 * is used on a custom data type.
 	 */
 	@Test(expected = InvalidProgramException.class)
@@ -86,7 +86,7 @@ public class MaxByOperatorTest {
 
 	/**
 	 * This test validates that an index which is out of bounds throws an
-	 * IndexOutOfBOundsExcpetion.
+	 * IndexOutOfBoundsException.
 	 */
 	@Test(expected = IndexOutOfBoundsException.class)
 	public void testOutOfTupleBoundsDataset1() {
@@ -100,7 +100,7 @@ public class MaxByOperatorTest {
 
 	/**
 	 * This test validates that an index which is out of bounds throws an
-	 * IndexOutOfBOundsExcpetion.
+	 * IndexOutOfBoundsException.
 	 */
 	@Test(expected = IndexOutOfBoundsException.class)
 	public void testOutOfTupleBoundsDataset2() {
@@ -114,7 +114,7 @@ public class MaxByOperatorTest {
 
 	/**
 	 * This test validates that an index which is out of bounds throws an
-	 * IndexOutOfBOundsExcpetion.
+	 * IndexOutOfBoundsException.
 	 */
 	@Test(expected = IndexOutOfBoundsException.class)
 	public void testOutOfTupleBoundsDataset3() {
@@ -147,7 +147,7 @@ public class MaxByOperatorTest {
 	}
 
 	/**
-	 * This test validates that an InvalidProgrammException is thrown when maxBy
+	 * This test validates that an InvalidProgramException is thrown when maxBy
 	 * is used on a custom data type.
 	 */
 	@Test(expected = InvalidProgramException.class)
@@ -164,7 +164,7 @@ public class MaxByOperatorTest {
 
 	/**
 	 * This test validates that an index which is out of bounds throws an
-	 * IndexOutOfBOundsExcpetion.
+	 * IndexOutOfBoundsException.
 	 */
 	@Test(expected = IndexOutOfBoundsException.class)
 	public void testOutOfTupleBoundsGrouping1() {
@@ -178,7 +178,7 @@ public class MaxByOperatorTest {
 
 	/**
 	 * This test validates that an index which is out of bounds throws an
-	 * IndexOutOfBOundsExcpetion.
+	 * IndexOutOfBoundsException.
 	 */
 	@Test(expected = IndexOutOfBoundsException.class)
 	public void testOutOfTupleBoundsGrouping2() {
@@ -192,7 +192,7 @@ public class MaxByOperatorTest {
 
 	/**
 	 * This test validates that an index which is out of bounds throws an
-	 * IndexOutOfBOundsExcpetion.
+	 * IndexOutOfBoundsException.
 	 */
 	@Test(expected = IndexOutOfBoundsException.class)
 	public void testOutOfTupleBoundsGrouping3() {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-java/src/test/java/org/apache/flink/api/java/operator/MinByOperatorTest.java
----------------------------------------------------------------------
diff --git a/flink-java/src/test/java/org/apache/flink/api/java/operator/MinByOperatorTest.java b/flink-java/src/test/java/org/apache/flink/api/java/operator/MinByOperatorTest.java
index 02b84fa..4a77f91 100644
--- a/flink-java/src/test/java/org/apache/flink/api/java/operator/MinByOperatorTest.java
+++ b/flink-java/src/test/java/org/apache/flink/api/java/operator/MinByOperatorTest.java
@@ -69,7 +69,7 @@ public class MinByOperatorTest {
 	private final List<CustomType> customTypeData = new ArrayList<CustomType>();
 
 	/**
-	 * This test validates that an InvalidProgrammException is thrown when minBy
+	 * This test validates that an InvalidProgramException is thrown when minBy
 	 * is used on a custom data type.
 	 */
 	@Test(expected = InvalidProgramException.class)
@@ -86,7 +86,7 @@ public class MinByOperatorTest {
 
 	/**
 	 * This test validates that an index which is out of bounds throws an
-	 * IndexOutOfBOundsExcpetion.
+	 * IndexOutOfBoundsException.
 	 */
 	@Test(expected = IndexOutOfBoundsException.class)
 	public void testOutOfTupleBoundsDataset1() {
@@ -100,7 +100,7 @@ public class MinByOperatorTest {
 
 	/**
 	 * This test validates that an index which is out of bounds throws an
-	 * IndexOutOfBOundsExcpetion.
+	 * IndexOutOfBoundsException.
 	 */
 	@Test(expected = IndexOutOfBoundsException.class)
 	public void testOutOfTupleBoundsDataset2() {
@@ -114,7 +114,7 @@ public class MinByOperatorTest {
 
 	/**
 	 * This test validates that an index which is out of bounds throws an
-	 * IndexOutOfBOundsExcpetion.
+	 * IndexOutOfBoundsException.
 	 */
 	@Test(expected = IndexOutOfBoundsException.class)
 	public void testOutOfTupleBoundsDataset3() {
@@ -147,7 +147,7 @@ public class MinByOperatorTest {
 	}
 
 	/**
-	 * This test validates that an InvalidProgrammException is thrown when minBy
+	 * This test validates that an InvalidProgramException is thrown when minBy
 	 * is used on a custom data type.
 	 */
 	@Test(expected = InvalidProgramException.class)
@@ -164,7 +164,7 @@ public class MinByOperatorTest {
 
 	/**
 	 * This test validates that an index which is out of bounds throws an
-	 * IndexOutOfBOundsExcpetion.
+	 * IndexOutOfBoundsException.
 	 */
 	@Test(expected = IndexOutOfBoundsException.class)
 	public void testOutOfTupleBoundsGrouping1() {
@@ -178,7 +178,7 @@ public class MinByOperatorTest {
 
 	/**
 	 * This test validates that an index which is out of bounds throws an
-	 * IndexOutOfBOundsExcpetion.
+	 * IndexOutOfBoundsException.
 	 */
 	@Test(expected = IndexOutOfBoundsException.class)
 	public void testOutOfTupleBoundsGrouping2() {
@@ -192,7 +192,7 @@ public class MinByOperatorTest {
 
 	/**
 	 * This test validates that an index which is out of bounds throws an
-	 * IndexOutOfBOundsExcpetion.
+	 * IndexOutOfBoundsException.
 	 */
 	@Test(expected = IndexOutOfBoundsException.class)
 	public void testOutOfTupleBoundsGrouping3() {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-java8/src/main/java/org/apache/flink/examples/java8/wordcount/WordCount.java
----------------------------------------------------------------------
diff --git a/flink-java8/src/main/java/org/apache/flink/examples/java8/wordcount/WordCount.java b/flink-java8/src/main/java/org/apache/flink/examples/java8/wordcount/WordCount.java
index 0130dec..8f36f66 100644
--- a/flink-java8/src/main/java/org/apache/flink/examples/java8/wordcount/WordCount.java
+++ b/flink-java8/src/main/java/org/apache/flink/examples/java8/wordcount/WordCount.java
@@ -62,7 +62,7 @@ public class WordCount {
 		DataSet<Tuple2<String, Integer>> counts =
 				// normalize and split each line
 				text.map(line -> line.toLowerCase().split("\\W+"))
-				// convert splitted line in pairs (2-tuples) containing: (word,1)
+				// convert split line in pairs (2-tuples) containing: (word,1)
 				.flatMap((String[] tokens, Collector<Tuple2<String, Integer>> out) -> {
 					// emit the pairs with non-zero-length words
 					Arrays.stream(tokens)

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java
----------------------------------------------------------------------
diff --git a/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java b/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java
index f991433..b9dba77 100644
--- a/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java
+++ b/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java
@@ -62,7 +62,7 @@ public class WordCount {
 		DataStream<Tuple2<String, Integer>> counts =
 				// normalize and split each line
 				text.map(line -> line.toLowerCase().split("\\W+"))
-				// convert splitted line in pairs (2-tuples) containing: (word,1)
+				// convert split line in pairs (2-tuples) containing: (word,1)
 				.flatMap((String[] tokens, Collector<Tuple2<String, Integer>> out) -> {
 					// emit the pairs with non-zero-length words
 					Arrays.stream(tokens)

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-java8/src/test/java/org/apache/flink/test/api/java/operators/lambdas/FilterITCase.java
----------------------------------------------------------------------
diff --git a/flink-java8/src/test/java/org/apache/flink/test/api/java/operators/lambdas/FilterITCase.java b/flink-java8/src/test/java/org/apache/flink/test/api/java/operators/lambdas/FilterITCase.java
index 345b119..6ad1058 100644
--- a/flink-java8/src/test/java/org/apache/flink/test/api/java/operators/lambdas/FilterITCase.java
+++ b/flink-java8/src/test/java/org/apache/flink/test/api/java/operators/lambdas/FilterITCase.java
@@ -28,7 +28,7 @@ import java.util.Collections;
 import java.util.List;
 
 /**
- * IT cases for lambda filter funtions.
+ * IT cases for lambda filter functions.
  */
 public class FilterITCase extends JavaProgramTestBase {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/nfa/NFATest.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/nfa/NFATest.java b/flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/nfa/NFATest.java
index 2a12d37..7721653 100644
--- a/flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/nfa/NFATest.java
+++ b/flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/nfa/NFATest.java
@@ -133,7 +133,7 @@ public class NFATest extends TestLogger {
 
 	/**
 	 * Tests that elements whose timestamp difference is exactly the window length are not matched.
-	 * The reaon is that the right window side (later elements) is exclusive.
+	 * The reason is that the right window side (later elements) is exclusive.
 	 */
 	@Test
 	public void testWindowBorders() {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/generator/random/RandomGenerableFactory.java
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/generator/random/RandomGenerableFactory.java b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/generator/random/RandomGenerableFactory.java
index ead29fc..c1a8999 100644
--- a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/generator/random/RandomGenerableFactory.java
+++ b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/generator/random/RandomGenerableFactory.java
@@ -46,7 +46,7 @@ import java.util.List;
 public interface RandomGenerableFactory<T extends RandomGenerator> {
 
 	/**
-	 * The amount of work ({@code elementCount * cyclerPerElement}) is used to
+	 * The amount of work ({@code elementCount * cyclesPerElement}) is used to
 	 * generate a list of blocks of work of near-equal size.
 	 *
 	 * @param elementCount number of elements, as indexed in the {@code BlockInfo}

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/classification/SVM.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/classification/SVM.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/classification/SVM.scala
index 721dd69..6d78ef7 100644
--- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/classification/SVM.scala
+++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/classification/SVM.scala
@@ -35,7 +35,7 @@ import breeze.linalg.{DenseVector => BreezeDenseVector, Vector => BreezeVector}
 /** Implements a soft-margin SVM using the communication-efficient distributed dual coordinate
   * ascent algorithm (CoCoA) with hinge-loss function.
   *
-  * It can be used for binary classification problems, with the labels set as +1.0 to indiciate a
+  * It can be used for binary classification problems, with the labels set as +1.0 to indicate a
   * positive example and -1.0 to indicate a negative example.
   *
   * The algorithm solves the following minimization problem:

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/outlier/StochasticOutlierSelection.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/outlier/StochasticOutlierSelection.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/outlier/StochasticOutlierSelection.scala
index 2c04bb0..ee82c03 100644
--- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/outlier/StochasticOutlierSelection.scala
+++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/outlier/StochasticOutlierSelection.scala
@@ -154,7 +154,7 @@ object StochasticOutlierSelection extends WithParameters {
     new TransformDataSetOperation[StochasticOutlierSelection, LabeledVector, (Int, Double)] {
 
 
-      /** Overrides the method of the parent class and applies the sochastic outlier selection
+      /** Overrides the method of the parent class and applies the stochastic outlier selection
         * algorithm.
         *
         * @param instance Instance of the class
@@ -181,7 +181,7 @@ object StochasticOutlierSelection extends WithParameters {
   }
 
   /** [[TransformDataSetOperation]] applies the stochastic outlier selection algorithm on a
-    * [[Vector]] which will transform the high-dimensionaly input to a single Double output.
+    * [[Vector]] which will transform the high-dimensional input to a single Double output.
     *
     * @tparam T Type of the input and output data which has to be a subtype of [[Vector]]
     * @return [[TransformDataSetOperation]] a single double which represents the oulierness of

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Estimator.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Estimator.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Estimator.scala
index dbe0782..ca7cb33 100644
--- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Estimator.scala
+++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Estimator.scala
@@ -117,7 +117,7 @@ object Estimator{
   }
 
   /** Fallback [[TransformDataSetOperation]] for [[Transformer]] which do not support the input or
-    * output type with which they are called. This is usualy the case if pipeline operators are
+    * output type with which they are called. This is usually the case if pipeline operators are
     * chained which have incompatible input/output types. In order to detect these failures, the
     * fallback [[TransformDataSetOperation]] throws a [[RuntimeException]] with the corresponding
     * input/output types. Consequently, a wrong pipeline will be detected at pre-flight phase of

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Predictor.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Predictor.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Predictor.scala
index 9d11cff..d0f3064 100644
--- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Predictor.scala
+++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Predictor.scala
@@ -230,7 +230,7 @@ trait PredictOperation[Instance, Model, Testing, Prediction] extends Serializabl
   /** Calculates the prediction for a single element given the model of the [[Predictor]].
     *
     * @param value The unlabeled example on which we make the prediction
-    * @param model The model representation of the prediciton algorithm
+    * @param model The model representation of the prediction algorithm
     * @return A label for the provided example of type [[Prediction]]
     */
   def predict(value: Testing, model: Model):

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Transformer.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Transformer.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Transformer.scala
index 014ad2b..4b44127 100644
--- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Transformer.scala
+++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/pipeline/Transformer.scala
@@ -48,7 +48,7 @@ trait Transformer[Self <: Transformer[Self]]
   with Serializable {
   that: Self =>
 
-  /** Transform operation which transforms an input [[DataSet]] of type I into an ouptut [[DataSet]]
+  /** Transform operation which transforms an input [[DataSet]] of type I into an output [[DataSet]]
     * of type O. The actual transform operation is implemented within the
     * [[TransformDataSetOperation]].
     *
@@ -57,7 +57,7 @@ trait Transformer[Self <: Transformer[Self]]
     * @param transformOperation [[TransformDataSetOperation]] which encapsulates the algorithm's
     *                          logic
     * @tparam Input Input data type
-    * @tparam Output Ouptut data type
+    * @tparam Output Output data type
     * @return
     */
   def transform[Input, Output](
@@ -125,7 +125,7 @@ object Transformer{
   * @tparam Instance Type of the [[Transformer]] for which the [[TransformDataSetOperation]] is
   *                  defined
   * @tparam Input Input data type
-  * @tparam Output Ouptut data type
+  * @tparam Output Output data type
   */
 trait TransformDataSetOperation[Instance, Input, Output] extends Serializable{
   def transformDataSet(
@@ -148,10 +148,10 @@ trait TransformOperation[Instance, Model, Input, Output] extends Serializable{
   /** Retrieves the model of the [[Transformer]] for which this operation has been defined.
     *
     * @param instance
-    * @param transformParemters
+    * @param transformParameters
     * @return
     */
-  def getModel(instance: Instance, transformParemters: ParameterMap): DataSet[Model]
+  def getModel(instance: Instance, transformParameters: ParameterMap): DataSet[Model]
 
   /** Transforms a single element with respect to the model associated with the respective
     * [[Transformer]]

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/preprocessing/Splitter.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/preprocessing/Splitter.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/preprocessing/Splitter.scala
index 46b1462..3451c80 100644
--- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/preprocessing/Splitter.scala
+++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/preprocessing/Splitter.scala
@@ -140,7 +140,7 @@ object Splitter {
    * @param kFolds          The number of TrainTest DataSets to be returns. Each 'testing' will be
    *                        1/k of the dataset, randomly sampled, the training will be the remainder
    *                        of the dataset.  The DataSet is split into kFolds first, so that no
-   *                        observation will occurin in multiple folds.
+   *                        observation will occuring in multiple folds.
    * @param seed            Random number generator seed.
    * @return An array of TrainTestDataSets
    */

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/recommendation/ALS.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/recommendation/ALS.scala b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/recommendation/ALS.scala
index 0454381..2e2e35a 100644
--- a/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/recommendation/ALS.scala
+++ b/flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/recommendation/ALS.scala
@@ -40,7 +40,7 @@ import scala.util.Random
 
 /** Alternating least squares algorithm to calculate a matrix factorization.
   *
-  * Given a matrix `R`, ALS calculates two matricess `U` and `V` such that `R ~~ U^TV`. The
+  * Given a matrix `R`, ALS calculates two matrices `U` and `V` such that `R ~~ U^TV`. The
   * unknown row dimension is given by the number of latent factors. Since matrix factorization
   * is often used in the context of recommendation, we'll call the first matrix the user and the
   * second matrix the item matrix. The `i`th column of the user matrix is `u_i` and the `i`th

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/resources/tableSourceConverter.properties
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/resources/tableSourceConverter.properties b/flink-libraries/flink-table/src/main/resources/tableSourceConverter.properties
index d548f48..86a48a8 100644
--- a/flink-libraries/flink-table/src/main/resources/tableSourceConverter.properties
+++ b/flink-libraries/flink-table/src/main/resources/tableSourceConverter.properties
@@ -18,9 +18,9 @@
 
 ################################################################################
 # The config file is used to specify the packages of current module where
-# to find TableSourceConverter implementation class annotationed with TableType.
+# to find TableSourceConverter implementation class annotated with TableType.
 # If there are multiple packages to scan, put those packages together into a
-# string seperated with ',', for example, org.package1,org.package2.
+# string separated with ',', for example, org.package1,org.package2.
 # Please notice:
 # It's better to have a tableSourceConverter.properties in each connector Module
 # which offers converters instead of put all information into the

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
index 6170fa1..6e00b56 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
@@ -301,7 +301,7 @@ abstract class TableEnvironment(val config: TableConfig) {
       throw new ExternalCatalogAlreadyExistException(name)
     }
     this.externalCatalogs.put(name, externalCatalog)
-    // create an external catalog calicte schema, register it on the root schema
+    // create an external catalog Calcite schema, register it on the root schema
     ExternalCatalogSchema.registerCatalog(rootSchema, name, externalCatalog)
   }
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableSchema.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableSchema.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableSchema.scala
index f4d928f..534ef39 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableSchema.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableSchema.scala
@@ -40,9 +40,9 @@ class TableSchema(
   // check uniqueness of field names
   if (columnNames.toSet.size != columnTypes.length) {
     val duplicateFields = columnNames
-      // count occurences of field names
+      // count occurrences of field names
       .groupBy(identity).mapValues(_.length)
-      // filter for occurences > 1 and map to field name
+      // filter for occurrences > 1 and map to field name
       .filter(g => g._2 > 1).keys
 
     throw new TableException(

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/calcite/FlinkTypeSystem.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/calcite/FlinkTypeSystem.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/calcite/FlinkTypeSystem.scala
index 3a195ed..99d8cab 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/calcite/FlinkTypeSystem.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/calcite/FlinkTypeSystem.scala
@@ -26,11 +26,11 @@ import org.apache.calcite.sql.`type`.SqlTypeName
   */
 class FlinkTypeSystem extends RelDataTypeSystemImpl {
 
-  // we cannot use Int.MaxValue because of an overflow in Calcites type inference logic
+  // we cannot use Int.MaxValue because of an overflow in Calcite's type inference logic
   // half should be enough for all use cases
   override def getMaxNumericScale: Int = Int.MaxValue / 2
 
-  // we cannot use Int.MaxValue because of an overflow in Calcites type inference logic
+  // we cannot use Int.MaxValue because of an overflow in Calcite's type inference logic
   // half should be enough for all use cases
   override def getMaxNumericPrecision: Int = Int.MaxValue / 2
 
@@ -40,7 +40,7 @@ class FlinkTypeSystem extends RelDataTypeSystemImpl {
     case SqlTypeName.VARCHAR =>
       Int.MaxValue
 
-    // we currenty support only timestamps with milliseconds precision
+    // we currently support only timestamps with milliseconds precision
     case SqlTypeName.TIMESTAMP =>
       3
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CodeGenerator.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CodeGenerator.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CodeGenerator.scala
index df7ef57..3a43544 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CodeGenerator.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CodeGenerator.scala
@@ -244,7 +244,7 @@ abstract class CodeGenerator(
     * @param returnType conversion target type. Inputs and output must have the same arity.
     * @param resultFieldNames result field names necessary for a mapping to POJO fields.
     * @param rowtimeExpression an expression to extract the value of a rowtime field from
-    *                          the input data. Required if the field indicies include a rowtime
+    *                          the input data. Required if the field indices include a rowtime
     *                          marker.
     * @return instance of GeneratedExpression
     */

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/ScalarFunction.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/ScalarFunction.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/ScalarFunction.scala
index e41b876..4c01c1c 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/ScalarFunction.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/ScalarFunction.scala
@@ -61,7 +61,7 @@ abstract class ScalarFunction extends UserDefinedFunction {
   /**
     * Returns the result type of the evaluation method with a given signature.
     *
-    * This method needs to be overriden in case Flink's type extraction facilities are not
+    * This method needs to be overridden in case Flink's type extraction facilities are not
     * sufficient to extract the [[TypeInformation]] based on the return type of the evaluation
     * method. Flink's type extraction facilities can handle basic types or
     * simple POJOs but might be wrong for more complex, custom, or composite types.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/TableFunction.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/TableFunction.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/TableFunction.scala
index ff69954..d80ec47 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/TableFunction.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/TableFunction.scala
@@ -111,7 +111,7 @@ abstract class TableFunction[T] extends UserDefinedFunction {
   /**
     * Returns the result type of the evaluation method with a given signature.
     *
-    * This method needs to be overriden in case Flink's type extraction facilities are not
+    * This method needs to be overridden in case Flink's type extraction facilities are not
     * sufficient to extract the [[TypeInformation]] based on the return type of the evaluation
     * method. Flink's type extraction facilities can handle basic types or
     * simple POJOs but might be wrong for more complex, custom, or composite types.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/AggSqlFunction.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/AggSqlFunction.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/AggSqlFunction.scala
index 4b1e921..241e511 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/AggSqlFunction.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/AggSqlFunction.scala
@@ -58,7 +58,7 @@ class AggSqlFunction(
     createReturnTypeInference(returnType, typeFactory),
     createOperandTypeInference(aggregateFunction, typeFactory),
     createOperandTypeChecker(aggregateFunction),
-    // Do not need to provide a calcite aggregateFunction here. Flink aggregateion function
+    // Do not need to provide a calcite aggregateFunction here. Flink aggregation function
     // will be generated when translating the calcite relnode to flink runtime execution plan
     null,
     false,

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/UserDefinedFunctionUtils.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/UserDefinedFunctionUtils.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/UserDefinedFunctionUtils.scala
index 4a34732..c2eabae 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/UserDefinedFunctionUtils.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/utils/UserDefinedFunctionUtils.scala
@@ -632,7 +632,7 @@ object UserDefinedFunctionUtils {
   /**
     * Creates a [[LogicalTableFunctionCall]] by parsing a String expression.
     *
-    * @param tableEnv The table environmenent to lookup the function.
+    * @param tableEnv The table environment to lookup the function.
     * @param udtf a String expression of a TableFunctionCall, such as "split(c)"
     * @return A LogicalTableFunctionCall.
     */

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
index c53f090..4331457 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
@@ -143,7 +143,7 @@ trait CommonCorrelate {
          |""".stripMargin
     } else {
 
-      // adjust indicies of InputRefs to adhere to schema expected by generator
+      // adjust indices of InputRefs to adhere to schema expected by generator
       val changeInputRefIndexShuttle = new RexShuttle {
         override def visitInputRef(inputRef: RexInputRef): RexNode = {
           new RexInputRef(inputSchema.arity + inputRef.getIndex, inputRef.getType)

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/DataSetJoin.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/DataSetJoin.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/DataSetJoin.scala
index e461c57..56bf8ea 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/DataSetJoin.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/DataSetJoin.scala
@@ -443,11 +443,11 @@ class DataSetJoin(
       s"join: (${joinSelectionToString(joinRowType)})"
   }
 
-  /** Returns an array of indicies with some indicies being a prefix. */
+  /** Returns an array of indices with some indices being a prefix. */
   private def getFullIndiciesWithPrefix(keys: Array[Int], numFields: Int): Array[Int] = {
-    // get indicies of all fields which are not keys
+    // get indices of all fields which are not keys
     val nonKeys = (0 until numFields).filter(!keys.contains(_))
-    // return all field indicies prefixed by keys
+    // return all field indices prefixed by keys
     keys ++ nonKeys
   }
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/datastream/retractionTraits.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/datastream/retractionTraits.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/datastream/retractionTraits.scala
index 173b7d3..b1c9222 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/datastream/retractionTraits.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/datastream/retractionTraits.scala
@@ -98,13 +98,14 @@ object AccMode extends Enumeration {
     * Changes are encoded as follows:
     * - insert: (true, NewRow)
     * - update: (true, NewRow) // the Row includes the full unique key to identify the row to update
-    * - delete: (false, OldRow) // the Row includes the full unique key to idenify the row to delete
+    * - delete: (false, OldRow) // the Row includes the full unique key to identify the row to
+    * delete
     *
     */
   val Acc = Value
 
   /**
-    * * An operator in [[AccRetract]] mode emits change messages as
+    * An operator in [[AccRetract]] mode emits change messages as
     * [[org.apache.flink.table.runtime.types.CRow]] which encode a pair of (Boolean, Row).
     *
     * Changes are encoded as follows:

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/schema/InlineTable.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/schema/InlineTable.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/schema/InlineTable.scala
index 7e61fdf..84f1d11 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/schema/InlineTable.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/schema/InlineTable.scala
@@ -46,9 +46,9 @@ abstract class InlineTable[T](
   // check uniqueness of field names
   if (fieldNames.length != fieldNames.toSet.size) {
     val duplicateFields = fieldNames
-      // count occurences of field names
+      // count occurrences of field names
       .groupBy(identity).mapValues(_.length)
-      // filter for occurences > 1 and map to field name
+      // filter for occurrences > 1 and map to field name
       .filter(g => g._2 > 1).keys
 
     throw new TableException(

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateTimeWindowFunction.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateTimeWindowFunction.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateTimeWindowFunction.scala
index a908f49..3156615 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateTimeWindowFunction.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateTimeWindowFunction.scala
@@ -27,7 +27,7 @@ import org.apache.flink.table.runtime.types.CRow
 import org.apache.flink.util.Collector
 
 /**
-  * Computes the final aggregate value from incrementally computed aggreagtes.
+  * Computes the final aggregate value from incrementally computed aggregates.
   *
   * @param numGroupingKey the number of grouping keys
   * @param numAggregates the number of aggregates

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateWindowFunction.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateWindowFunction.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateWindowFunction.scala
index c9fa0c9..4ec6407 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateWindowFunction.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/IncrementalAggregateWindowFunction.scala
@@ -27,7 +27,7 @@ import org.apache.flink.types.Row
 import org.apache.flink.util.Collector
 
 /**
-  * Computes the final aggregate value from incrementally computed aggreagtes.
+  * Computes the final aggregate value from incrementally computed aggregates.
   *
   * @param numGroupingKey The number of grouping keys.
   * @param numAggregates The number of aggregates.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeSortProcessFunction.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeSortProcessFunction.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeSortProcessFunction.scala
index 0d69355..f40feb1 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeSortProcessFunction.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeSortProcessFunction.scala
@@ -30,7 +30,7 @@ import org.apache.flink.types.Row
 import org.apache.flink.util.{Collector, Preconditions}
 
 /**
- * ProcessFunction to sort on event-time and possibly addtional secondary sort attributes.
+ * ProcessFunction to sort on event-time and possibly additional secondary sort attributes.
  *
   * @param inputRowType The data type of the input data.
   * @param rowtimeIdx The index of the rowtime field.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeUnboundedOver.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeUnboundedOver.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeUnboundedOver.scala
index 27d307b..181c768 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeUnboundedOver.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeUnboundedOver.scala
@@ -192,7 +192,7 @@ abstract class RowTimeUnboundedOver(
         val curTimestamp = sortedTimestamps.removeFirst()
         val curRowList = rowMapState.get(curTimestamp)
 
-        // process the same timestamp datas, the mechanism is different according ROWS or RANGE
+        // process the same timestamp data, the mechanism is different according ROWS or RANGE
         processElementsWithSameTimestamp(curRowList, lastAccumulator, out)
 
         rowMapState.remove(curTimestamp)
@@ -234,7 +234,7 @@ abstract class RowTimeUnboundedOver(
   }
 
   /**
-   * Process the same timestamp datas, the mechanism is different between
+   * Process the same timestamp data, the mechanism is different between
    * rows and range window.
    */
   def processElementsWithSameTimestamp(

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/join/WindowJoinUtil.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/join/WindowJoinUtil.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/join/WindowJoinUtil.scala
index 7006476..18e26df 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/join/WindowJoinUtil.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/join/WindowJoinUtil.scala
@@ -420,7 +420,7 @@ object WindowJoinUtil {
     * Generates a JoinFunction that applies additional join predicates and projects the result.
     *
     * @param  config          table env config
-    * @param  joinType        join type to determain whether input can be null
+    * @param  joinType        join type to determine whether input can be null
     * @param  leftType        left stream type
     * @param  rightType       right stream type
     * @param  returnType      return type

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/CsvTableSource.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/CsvTableSource.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/CsvTableSource.scala
index 659a6cd..ba076b4 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/CsvTableSource.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/CsvTableSource.scala
@@ -153,7 +153,6 @@ class CsvTableSource private (
   override def projectFields(fields: Array[Int]): CsvTableSource = {
 
     val selectedFields = if (fields.isEmpty) Array(0) else fields
-//    val selectedFiels = fields
 
     new CsvTableSource(
       path,

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/TableSourceUtil.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/TableSourceUtil.scala b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/TableSourceUtil.scala
index 6895419..5cb7e90 100644
--- a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/TableSourceUtil.scala
+++ b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/TableSourceUtil.scala
@@ -243,7 +243,7 @@ object TableSourceUtil {
     * Returns the Calcite schema of a [[TableSource]].
     *
     * @param tableSource The [[TableSource]] for which the Calcite schema is generated.
-    * @param selectedFields The indicies of all selected fields. None, if all fields are selected.
+    * @param selectedFields The indices of all selected fields. None, if all fields are selected.
     * @param streaming Flag to determine whether the schema of a stream or batch table is created.
     * @param typeFactory The type factory to create the schema.
     * @return The Calcite schema for the selected fields of the given [[TableSource]].

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/RowTypeTest.scala
----------------------------------------------------------------------
diff --git a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/RowTypeTest.scala b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/RowTypeTest.scala
index abe3ae2..df84a84 100644
--- a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/RowTypeTest.scala
+++ b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/RowTypeTest.scala
@@ -49,7 +49,7 @@ class RowTypeTest extends RowTypeTestBase {
         "Map('foo', 'bar'), row(1, true))",
       "ROW(DATE '1985-04-11', CAST(0.1 AS DECIMAL), ARRAY[1, 2, 3], " +
         "MAP['foo', 'bar'], row(1, true))",
-      "1985-04-11,0.1,[1, 2, 3],{foo=bar},1,true") // string faltten
+      "1985-04-11,0.1,[1, 2, 3],{foo=bar},1,true") // string flatten
 
     testAllApis(
       row(1 + 1, 2 * 3, Null(Types.STRING)),

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-mesos/src/main/java/org/apache/flink/mesos/entrypoint/MesosEntrypointUtils.java
----------------------------------------------------------------------
diff --git a/flink-mesos/src/main/java/org/apache/flink/mesos/entrypoint/MesosEntrypointUtils.java b/flink-mesos/src/main/java/org/apache/flink/mesos/entrypoint/MesosEntrypointUtils.java
index 368d62d..af72c96 100755
--- a/flink-mesos/src/main/java/org/apache/flink/mesos/entrypoint/MesosEntrypointUtils.java
+++ b/flink-mesos/src/main/java/org/apache/flink/mesos/entrypoint/MesosEntrypointUtils.java
@@ -42,7 +42,7 @@ import scala.concurrent.duration.Duration;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
- * Utils for Mesos entrpoints.
+ * Utils for Mesos entry points.
  */
 public class MesosEntrypointUtils {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/services/AbstractMesosServices.java
----------------------------------------------------------------------
diff --git a/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/services/AbstractMesosServices.java b/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/services/AbstractMesosServices.java
index e4f4cf7..63f371d 100644
--- a/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/services/AbstractMesosServices.java
+++ b/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/services/AbstractMesosServices.java
@@ -27,7 +27,7 @@ import akka.actor.ActorSystem;
 import static org.apache.flink.util.Preconditions.checkNotNull;
 
 /**
- * An abrstact implementation of {@link MesosServices}.
+ * An abstract implementation of {@link MesosServices}.
  */
 public abstract class AbstractMesosServices implements MesosServices {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-metrics/flink-metrics-jmx/src/main/java/org/apache/flink/metrics/jmx/JMXReporter.java
----------------------------------------------------------------------
diff --git a/flink-metrics/flink-metrics-jmx/src/main/java/org/apache/flink/metrics/jmx/JMXReporter.java b/flink-metrics/flink-metrics-jmx/src/main/java/org/apache/flink/metrics/jmx/JMXReporter.java
index 1cc7d38..461f1dc 100644
--- a/flink-metrics/flink-metrics-jmx/src/main/java/org/apache/flink/metrics/jmx/JMXReporter.java
+++ b/flink-metrics/flink-metrics-jmx/src/main/java/org/apache/flink/metrics/jmx/JMXReporter.java
@@ -85,7 +85,7 @@ public class JMXReporter implements MetricReporter {
 	/** The names under which the registered metrics have been added to the MBeanServer. */
 	private final Map<Metric, ObjectName> registeredMetrics;
 
-	/** The server to which JMX clients connect to. ALlows for better control over port usage. */
+	/** The server to which JMX clients connect to. Allows for better control over port usage. */
 	private JMXServer jmxServer;
 
 	public JMXReporter() {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-optimizer/src/main/java/org/apache/flink/optimizer/traversals/PlanFinalizer.java
----------------------------------------------------------------------
diff --git a/flink-optimizer/src/main/java/org/apache/flink/optimizer/traversals/PlanFinalizer.java b/flink-optimizer/src/main/java/org/apache/flink/optimizer/traversals/PlanFinalizer.java
index b3b7cf9..0136e20 100644
--- a/flink-optimizer/src/main/java/org/apache/flink/optimizer/traversals/PlanFinalizer.java
+++ b/flink-optimizer/src/main/java/org/apache/flink/optimizer/traversals/PlanFinalizer.java
@@ -209,7 +209,7 @@ public class PlanFinalizer implements Visitor<PlanNode> {
 			}
 		}
 
-		// pass the visitor to the iteraton's step function
+		// pass the visitor to the iteration's step function
 		if (visitable instanceof IterationPlanNode) {
 			// push the iteration node onto the stack
 			final IterationPlanNode iterNode = (IterationPlanNode) visitable;

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-optimizer/src/test/java/org/apache/flink/optimizer/UnionPropertyPropagationTest.java
----------------------------------------------------------------------
diff --git a/flink-optimizer/src/test/java/org/apache/flink/optimizer/UnionPropertyPropagationTest.java b/flink-optimizer/src/test/java/org/apache/flink/optimizer/UnionPropertyPropagationTest.java
index fefc627..7248c10 100644
--- a/flink-optimizer/src/test/java/org/apache/flink/optimizer/UnionPropertyPropagationTest.java
+++ b/flink-optimizer/src/test/java/org/apache/flink/optimizer/UnionPropertyPropagationTest.java
@@ -136,7 +136,7 @@ public class UnionPropertyPropagationTest extends CompilerTestBase {
 				}
 				
 				/* Test on the union input connections
-				 * Must be NUM_INPUTS input connections, all FlatMapOperators with a own partitioning strategy(propably hash)
+				 * Must be NUM_INPUTS input connections, all FlatMapOperators with a own partitioning strategy (probably hash)
 				 */
 				if (visitable instanceof NAryUnionPlanNode) {
 					int numberInputs = 0;

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/RedirectHandlerTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/RedirectHandlerTest.java b/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/RedirectHandlerTest.java
index 3a976e4..98dc20a 100644
--- a/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/RedirectHandlerTest.java
+++ b/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/RedirectHandlerTest.java
@@ -59,7 +59,7 @@ public class RedirectHandlerTest extends TestLogger {
 	 * Tests the behaviour of the RedirectHandler under the following conditions.
 	 *
 	 * <p>1. No local address known --> service unavailable
-	 * 2. Local address knwon but no gateway resolved --> service unavailable
+	 * 2. Local address known but no gateway resolved --> service unavailable
 	 * 3. Remote leader gateway --> redirection
 	 * 4. Local leader gateway
 	 * @throws Exception

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/history/HistoryServerStaticFileServerHandlerTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/history/HistoryServerStaticFileServerHandlerTest.java b/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/history/HistoryServerStaticFileServerHandlerTest.java
index 066de74..23f0f53 100644
--- a/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/history/HistoryServerStaticFileServerHandlerTest.java
+++ b/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/history/HistoryServerStaticFileServerHandlerTest.java
@@ -55,7 +55,7 @@ public class HistoryServerStaticFileServerHandlerTest {
 
 		int port = webUI.getServerPort();
 		try {
-			// verify that 404 message is returned when requesting a non-existant file
+			// verify that 404 message is returned when requesting a non-existent file
 			String notFound404 = HistoryServerTest.getFromHTTP("http://localhost:" + port + "/hello");
 			Assert.assertTrue(notFound404.contains("404 Not Found"));
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime-web/web-dashboard/vendor-local/d3-timeline.js
----------------------------------------------------------------------
diff --git a/flink-runtime-web/web-dashboard/vendor-local/d3-timeline.js b/flink-runtime-web/web-dashboard/vendor-local/d3-timeline.js
index aadca55..be02f32 100644
--- a/flink-runtime-web/web-dashboard/vendor-local/d3-timeline.js
+++ b/flink-runtime-web/web-dashboard/vendor-local/d3-timeline.js
@@ -69,7 +69,7 @@
         .attr("clip-path", "url(#" + prefix + "-gclip" + ")")
 
       // check if the user wants relative time
-      // if so, substract the first timestamp from each subsequent timestamps
+      // if so, subtract the first timestamp from each subsequent timestamps
       if(timeIsRelative){
         g.each(function (d, i) {
           d.forEach(function (datum, index) {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/akka/FlinkUntypedActor.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/akka/FlinkUntypedActor.java b/flink-runtime/src/main/java/org/apache/flink/runtime/akka/FlinkUntypedActor.java
index 8078c26..e0279b3 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/akka/FlinkUntypedActor.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/akka/FlinkUntypedActor.java
@@ -49,7 +49,7 @@ public abstract class FlinkUntypedActor extends UntypedActor {
 	 * processing time of the incoming message if the logging level is set to debug. After logging
 	 * the handleLeaderSessionID method is called.
 	 *
-	 * <p>Important: This method cannot be overriden. The actor specific message handling logic is
+	 * <p>Important: This method cannot be overridden. The actor specific message handling logic is
 	 * implemented by the method handleMessage.
 	 *
 	 * @param message Incoming message
@@ -124,7 +124,7 @@ public abstract class FlinkUntypedActor extends UntypedActor {
 	protected abstract void handleMessage(Object message) throws Exception;
 
 	/**
-	 * Returns the current leader session ID associcated with this actor.
+	 * Returns the current leader session ID associated with this actor.
 	 * @return
 	 */
 	protected abstract UUID getLeaderSessionID();
@@ -134,10 +134,10 @@ public abstract class FlinkUntypedActor extends UntypedActor {
 	 * a leader session ID (indicated by {@link RequiresLeaderSessionID}) in a
 	 * {@link LeaderSessionMessage} with the actor's leader session ID.
 	 *
-	 * <p>This method can be overriden to implement a different decoration behavior.
+	 * <p>This method can be overridden to implement a different decoration behavior.
 	 *
 	 * @param message Message to be decorated
-	 * @return The deocrated message
+	 * @return The decorated message
 	 */
 	protected Object decorateMessage(Object message) {
 		if (message instanceof RequiresLeaderSessionID) {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java b/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java
index ae59f59..83cb18e 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java
@@ -261,7 +261,7 @@ public class PermanentBlobCache extends AbstractBlobCache implements PermanentBl
 
 					/*
 					 * NOTE: normally it is not required to acquire the write lock to delete the job's
-					 *       storage directory since there should be noone accessing it with the ref
+					 *       storage directory since there should be no one accessing it with the ref
 					 *       counter being 0 - acquire it just in case, to always be on the safe side
 					 */
 						readWriteLock.writeLock().lock();

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
index 9a4456e..824563f 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
@@ -946,7 +946,7 @@ public class CheckpointCoordinator {
 	 * Fails all pending checkpoints which have not been acknowledged by the given execution
 	 * attempt id.
 	 *
-	 * @param executionAttemptId for which to discard unaknowledged pending checkpoints
+	 * @param executionAttemptId for which to discard unacknowledged pending checkpoints
 	 * @param cause of the failure
 	 */
 	public void failUnacknowledgedPendingCheckpointsFor(ExecutionAttemptID executionAttemptId, Throwable cause) {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpointStats.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpointStats.java b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpointStats.java
index 0f32250..59b404a 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpointStats.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/PendingCheckpointStats.java
@@ -56,7 +56,7 @@ public class PendingCheckpointStats extends AbstractCheckpointStats {
 	/** Current buffered bytes during alignment over all collected subtasks. */
 	private volatile long currentAlignmentBuffered;
 
-	/** Stats of the latest acknowleged subtask. */
+	/** Stats of the latest acknowledged subtask. */
 	private volatile SubtaskStateStats latestAcknowledgedSubtask;
 
 	/**

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobListeningContext.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobListeningContext.java b/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobListeningContext.java
index eb045c0..5ce0de8 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobListeningContext.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobListeningContext.java
@@ -114,7 +114,7 @@ public final class JobListeningContext {
 	}
 
 	/**
-	 * @return The Job Client actor which communicats with the JobManager.
+	 * @return The Job Client actor which communicates with the JobManager.
 	 */
 	public ActorRef getJobClientActor() {
 		return jobClientActor;

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java
index d1efd77..ecfbc60 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/BootstrapTools.java
@@ -353,7 +353,7 @@ public class BootstrapTools {
 	/**
 	 * Generates the shell command to start a task manager.
 	 * @param flinkConfig The Flink configuration.
-	 * @param tmParams Paramaters for the task manager.
+	 * @param tmParams Parameters for the task manager.
 	 * @param configDirectory The configuration directory for the flink-conf.yaml
 	 * @param logDirectory The log directory.
 	 * @param hasLogback Uses logback?

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/FlinkResourceManager.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/FlinkResourceManager.java b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/FlinkResourceManager.java
index f9c39c1..e20dd9b 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/FlinkResourceManager.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/FlinkResourceManager.java
@@ -738,7 +738,7 @@ public abstract class FlinkResourceManager<WorkerType extends ResourceIDRetrieva
 	 * Starts the resource manager actors.
 	 * @param configuration The configuration for the resource manager
 	 * @param actorSystem The actor system to start the resource manager in
-	 * @param leaderRetriever The leader retriever service to intialize the resource manager
+	 * @param leaderRetriever The leader retriever service to initialize the resource manager
 	 * @param resourceManagerClass The class of the ResourceManager to be started
 	 * @return ActorRef of the resource manager
 	 */
@@ -757,7 +757,7 @@ public abstract class FlinkResourceManager<WorkerType extends ResourceIDRetrieva
 	 * Starts the resource manager actors.
 	 * @param configuration The configuration for the resource manager
 	 * @param actorSystem The actor system to start the resource manager in
-	 * @param leaderRetriever The leader retriever service to intialize the resource manager
+	 * @param leaderRetriever The leader retriever service to initialize the resource manager
 	 * @param resourceManagerClass The class of the ResourceManager to be started
 	 * @param resourceManagerActorName The name of the resource manager actor.
 	 * @return ActorRef of the resource manager

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/HadoopConfOverlay.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/HadoopConfOverlay.java b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/HadoopConfOverlay.java
index bd79218..c45cd02 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/HadoopConfOverlay.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/HadoopConfOverlay.java
@@ -40,7 +40,7 @@ import java.io.IOException;
  * The following environment variables are set in the container:
  *  - HADOOP_CONF_DIR
  *
- * The folloowing Flink configuration entries are updated:
+ * The following Flink configuration entries are updated:
  *  - fs.hdfs.hadoopconf
  */
 public class HadoopConfOverlay implements ContainerOverlay {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/KeytabOverlay.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/KeytabOverlay.java b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/KeytabOverlay.java
index 271b32d..c8c87d4 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/KeytabOverlay.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/overlays/KeytabOverlay.java
@@ -33,7 +33,7 @@ import java.io.IOException;
 /**
  * Overlays cluster-level Kerberos credentials (i.e. keytab) into a container.
  *
- * The folloowing Flink configuration entries are updated:
+ * The following Flink configuration entries are updated:
  *  - security.kerberos.login.keytab
  */
 public class KeytabOverlay extends AbstractContainerOverlay {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java
index 3dec3f3..6eb9af4 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/types/ResourceProfile.java
@@ -76,7 +76,7 @@ public class ResourceProfile implements Serializable, Comparable<ResourceProfile
 	 * @param heapMemoryInMB The size of the heap memory, in megabytes.
 	 * @param directMemoryInMB The size of the direct memory, in megabytes.
 	 * @param nativeMemoryInMB The size of the native memory, in megabytes.
-	 * @param extendedResources The extendiable resources such as GPU and FPGA
+	 * @param extendedResources The extended resources such as GPU and FPGA
 	 */
 	public ResourceProfile(
 			double cpuCores,

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/AccessExecutionGraph.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/AccessExecutionGraph.java b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/AccessExecutionGraph.java
index ebc0768..c38f818 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/AccessExecutionGraph.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/AccessExecutionGraph.java
@@ -50,7 +50,7 @@ public interface AccessExecutionGraph {
 	JobID getJobID();
 
 	/**
-	 * Returns the job name for thie execution graph.
+	 * Returns the job name for the execution graph.
 	 *
 	 * @return job name for this execution graph
 	 */
@@ -90,7 +90,7 @@ public interface AccessExecutionGraph {
 	/**
 	 * Returns an iterable containing all job vertices for this execution graph in the order they were created.
 	 *
-	 * @return iterable containing all job vertices for this execution graph in the order they were creater
+	 * @return iterable containing all job vertices for this execution graph in the order they were created
 	 */
 	Iterable<? extends AccessExecutionJobVertex> getVerticesTopologically();
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
index cc35060..367d02c 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
@@ -782,7 +782,7 @@ public class Execution implements AccessExecution, Archiveable<ArchivedExecution
 	 * @param sampleId of the stack trace sample
 	 * @param numSamples the sample should contain
 	 * @param delayBetweenSamples to wait
-	 * @param maxStrackTraceDepth of the samples
+	 * @param maxStackTraceDepth of the samples
 	 * @param timeout until the request times out
 	 * @return Future stack trace sample response
 	 */
@@ -790,7 +790,7 @@ public class Execution implements AccessExecution, Archiveable<ArchivedExecution
 			int sampleId,
 			int numSamples,
 			Time delayBetweenSamples,
-			int maxStrackTraceDepth,
+			int maxStackTraceDepth,
 			Time timeout) {
 
 		final LogicalSlot slot = assignedResource;
@@ -803,7 +803,7 @@ public class Execution implements AccessExecution, Archiveable<ArchivedExecution
 				sampleId,
 				numSamples,
 				delayBetweenSamples,
-				maxStrackTraceDepth,
+				maxStackTraceDepth,
 				timeout);
 		} else {
 			return FutureUtils.completedExceptionally(new Exception("The execution has no slot assigned."));

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java
index cb4f2c8..ef46086 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java
@@ -447,7 +447,7 @@ public class ExecutionVertex implements AccessExecutionVertex, Archiveable<Archi
 	 *     <li>Repeated executions of stateful tasks try to co-locate the execution with its state.
 	 * </ul>
 	 * 
-	 * @return The preferred excution locations for the execution attempt.
+	 * @return The preferred execution locations for the execution attempt.
 	 * 
 	 * @see #getPreferredLocationsBasedOnState()
 	 * @see #getPreferredLocationsBasedOnInputs() 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/FsNegativeRunningJobsRegistry.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/FsNegativeRunningJobsRegistry.java b/flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/FsNegativeRunningJobsRegistry.java
index cb79a65..d29dcec 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/FsNegativeRunningJobsRegistry.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/FsNegativeRunningJobsRegistry.java
@@ -31,7 +31,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
 
 /**
  * This {@link RunningJobsRegistry} tracks the status jobs via marker files,
- * marking running jobs viarunning marker files, marking finished jobs via finished marker files.
+ * marking running jobs via running marker files, marking finished jobs via finished marker files.
  * 
  * <p>The general contract is the following:
  * <ul>

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/AbstractIterativeTask.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/AbstractIterativeTask.java b/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/AbstractIterativeTask.java
index bde358c..a36fc57 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/AbstractIterativeTask.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/AbstractIterativeTask.java
@@ -97,7 +97,7 @@ public abstract class AbstractIterativeTask<S extends Function, OT> extends Batc
 		// check if the driver is resettable
 		if (this.driver instanceof ResettableDriver) {
 			final ResettableDriver<?, ?> resDriver = (ResettableDriver<?, ?>) this.driver;
-			// make sure that the according inputs are not reseted
+			// make sure that the according inputs are not reset
 			for (int i = 0; i < resDriver.getNumberOfInputs(); i++) {
 				if (resDriver.isInputResettable(i)) {
 					excludeFromReset(i);

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobVertex.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobVertex.java b/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobVertex.java
index 4f52895..1fe95eb 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobVertex.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobVertex.java
@@ -68,7 +68,7 @@ public class JobVertex implements java.io.Serializable {
 	/** Number of subtasks to split this task into at runtime.*/
 	private int parallelism = ExecutionConfig.PARALLELISM_DEFAULT;
 
-	/** Maximum number of subtasks to split this taks into a runtime. */
+	/** Maximum number of subtasks to split this task into a runtime. */
 	private int maxParallelism = -1;
 
 	/** The minimum resource of the vertex */

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/SlotContext.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/SlotContext.java b/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/SlotContext.java
index 65bf2a1..3878167 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/SlotContext.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/SlotContext.java
@@ -32,7 +32,7 @@ public interface SlotContext {
 	 * Gets the id under which the slot has been allocated on the TaskManager. This id uniquely identifies the
 	 * physical slot.
 	 *
-	 * @return The id under whic teh slot has been allocated on the TaskManager
+	 * @return The id under which the slot has been allocated on the TaskManager
 	 */
 	AllocationID getAllocationId();
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolGateway.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolGateway.java b/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolGateway.java
index d3b51f7..7a627b4 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolGateway.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/slotpool/SlotPoolGateway.java
@@ -108,7 +108,7 @@ public interface SlotPoolGateway extends AllocatedSlotActions, RpcGateway {
 	 * individually accepted or rejected by returning the collection of accepted
 	 * slot offers.
 	 *
-	 * @param taskManagerLocation from which the slot offeres originate
+	 * @param taskManagerLocation from which the slot offers originate
 	 * @param taskManagerGateway to talk to the slot offerer
 	 * @param offers slot offers which are offered to the {@link SlotPool}
 	 * @return A collection of accepted slot offers (future). The remaining slot offers are

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java
index 87b0a76..be81877 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java
@@ -131,7 +131,7 @@ public class BatchTask<S extends Function, OT> extends AbstractInvokable impleme
 	protected int[] iterativeInputs;
 	
 	/**
-	 * The indices of the iterative broadcast inputs. Empty, if non of the inputs is iteratve. 
+	 * The indices of the iterative broadcast inputs. Empty, if non of the inputs is iterative.
 	 */
 	protected int[] iterativeBroadcastInputs;
 	
@@ -184,13 +184,13 @@ public class BatchTask<S extends Function, OT> extends AbstractInvokable impleme
 
 	/**
 	 * Certain inputs may be excluded from resetting. For example, the initial partial solution
-	 * in an iteration head must not be reseted (it is read through the back channel), when all
-	 * others are reseted.
+	 * in an iteration head must not be reset (it is read through the back channel), when all
+	 * others are reset.
 	 */
 	private boolean[] excludeFromReset;
 
 	/**
-	 * Flag indicating for each input whether it is cached and can be reseted.
+	 * Flag indicating for each input whether it is cached and can be reset.
 	 */
 	private boolean[] inputIsCached;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/operators/CoGroupDriver.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/CoGroupDriver.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/CoGroupDriver.java
index c3bd492..7ed86bf 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/CoGroupDriver.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/CoGroupDriver.java
@@ -122,14 +122,14 @@ public class CoGroupDriver<IT1, IT2, OT> implements Driver<CoGroupFunction<IT1,
 		}
 
 		if (objectReuseEnabled) {
-			// create CoGropuTaskIterator according to provided local strategy.
+			// create CoGroupTaskIterator according to provided local strategy.
 			this.coGroupIterator = new ReusingSortMergeCoGroupIterator<IT1, IT2>(
 					in1, in2,
 					serializer1, groupComparator1,
 					serializer2, groupComparator2,
 					pairComparatorFactory.createComparator12(groupComparator1, groupComparator2));
 		} else {
-			// create CoGropuTaskIterator according to provided local strategy.
+			// create CoGroupTaskIterator according to provided local strategy.
 			this.coGroupIterator = new NonReusingSortMergeCoGroupIterator<IT1, IT2>(
 					in1, in2,
 					serializer1, groupComparator1,

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java
index f46fcfb..bfc9aec 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java
@@ -628,7 +628,7 @@ public class InPlaceMutableHashTable<T> extends AbstractMutableHashTable<T> {
 		}
 
 		/**
-		 * Overwrites a record at the sepcified position. The record is read from a DataInputView  (this will be the staging area).
+		 * Overwrites a record at the specified position. The record is read from a DataInputView  (this will be the staging area).
 		 * WARNING: The record must not be larger than the original record.
 		 * @param pointer Points to the position to overwrite.
 		 * @param input The DataInputView to read the record from


[19/19] flink git commit: [hotfix] [core] Add a factory method to create Path from local file

Posted by se...@apache.org.
[hotfix] [core] Add a factory method to create Path from local file

This makes it easier for users and contributors to figure out how
to create local file paths in way that works cross operating systems.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/7034e9cf
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/7034e9cf
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/7034e9cf

Branch: refs/heads/master
Commit: 7034e9cfcb051ef90c5bf0960bfb50a79b3723f0
Parents: a49f037
Author: Stephan Ewen <se...@apache.org>
Authored: Wed Dec 13 17:06:37 2017 +0100
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:07 2018 +0100

----------------------------------------------------------------------
 .../java/org/apache/flink/core/fs/Path.java     | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/7034e9cf/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/Path.java b/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
index b463fd9..1334acc 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
@@ -28,6 +28,7 @@ import org.apache.flink.core.memory.DataInputView;
 import org.apache.flink.core.memory.DataOutputView;
 import org.apache.flink.util.StringUtils;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.Serializable;
 import java.net.URI;
@@ -528,4 +529,23 @@ public class Path implements IOReadableWritable, Serializable {
 				&& ((path.charAt(start) >= 'A' && path.charAt(start) <= 'Z') || (path.charAt(start) >= 'a' && path
 				.charAt(start) <= 'z'));
 	}
+
+	// ------------------------------------------------------------------------
+	//  Utilities
+	// ------------------------------------------------------------------------
+
+	/**
+	 * Creates a path for the given local file.
+	 *
+	 * <p>This method is useful to make sure the path creation for local files works
+	 * seamlessly across different operating systems. Especially Windows has slightly
+	 * different rules for slashes between schema and a local file path, making it
+	 * sometimes tricky to produce cross-platform URIs for local files.
+	 *
+	 * @param file The file that the path should represent.
+	 * @return A path representing the local file URI of the given file.
+	 */
+	public static Path fromLocalFile(File file) {
+		return new Path(file.toURI());
+	}
 }


[08/19] flink git commit: [hotfix] [checkpoints] Remove never used method 'close()' on CheckpointStreamFactory

Posted by se...@apache.org.
[hotfix] [checkpoints] Remove never used method 'close()' on CheckpointStreamFactory

The fact that the method was never called (and never implemented) strongly suggests
that it should be removed, otherwise someone might eventually end up implementing
it for a new state backend and wonder why it is never called.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/6360875f
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/6360875f
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/6360875f

Branch: refs/heads/master
Commit: 6360875f12a102612bbb6c79bed807712285e116
Parents: b82f59f
Author: Stephan Ewen <se...@apache.org>
Authored: Thu Oct 26 20:54:55 2017 +0200
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:06 2018 +0100

----------------------------------------------------------------------
 .../apache/flink/runtime/state/CheckpointStreamFactory.java  | 8 --------
 .../runtime/state/filesystem/FsCheckpointStreamFactory.java  | 3 ---
 .../runtime/state/memory/MemCheckpointStreamFactory.java     | 3 ---
 .../flink/runtime/util/BlockerCheckpointStreamFactory.java   | 5 -----
 .../runtime/tasks/TaskCheckpointingBehaviourTest.java        | 3 ---
 5 files changed, 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/6360875f/flink-runtime/src/main/java/org/apache/flink/runtime/state/CheckpointStreamFactory.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/state/CheckpointStreamFactory.java b/flink-runtime/src/main/java/org/apache/flink/runtime/state/CheckpointStreamFactory.java
index 199a856..73113ec 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/state/CheckpointStreamFactory.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/state/CheckpointStreamFactory.java
@@ -40,14 +40,6 @@ public interface CheckpointStreamFactory {
 			long timestamp) throws Exception;
 
 	/**
-	 * Closes the stream factory, releasing all internal resources, but does not delete any
-	 * persistent checkpoint data.
-	 *
-	 * @throws Exception Exceptions can be forwarded and will be logged by the system
-	 */
-	void close() throws Exception;
-
-	/**
 	 * A dedicated output stream that produces a {@link StreamStateHandle} when closed.
 	 *
 	 * <p>Note: This is an abstract class and not an interface because {@link OutputStream}

http://git-wip-us.apache.org/repos/asf/flink/blob/6360875f/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FsCheckpointStreamFactory.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FsCheckpointStreamFactory.java b/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FsCheckpointStreamFactory.java
index a824651..8f84a38 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FsCheckpointStreamFactory.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FsCheckpointStreamFactory.java
@@ -106,9 +106,6 @@ public class FsCheckpointStreamFactory implements CheckpointStreamFactory {
 	}
 
 	@Override
-	public void close() throws Exception {}
-
-	@Override
 	public FsCheckpointStateOutputStream createCheckpointStateOutputStream(long checkpointID, long timestamp) throws Exception {
 		checkFileSystemInitialized();
 

http://git-wip-us.apache.org/repos/asf/flink/blob/6360875f/flink-runtime/src/main/java/org/apache/flink/runtime/state/memory/MemCheckpointStreamFactory.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/state/memory/MemCheckpointStreamFactory.java b/flink-runtime/src/main/java/org/apache/flink/runtime/state/memory/MemCheckpointStreamFactory.java
index 3920ce8..602e9d1 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/state/memory/MemCheckpointStreamFactory.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/state/memory/MemCheckpointStreamFactory.java
@@ -45,9 +45,6 @@ public class MemCheckpointStreamFactory implements CheckpointStreamFactory {
 	}
 
 	@Override
-	public void close() throws Exception {}
-
-	@Override
 	public CheckpointStateOutputStream createCheckpointStateOutputStream(
 			long checkpointID, long timestamp) throws Exception
 	{

http://git-wip-us.apache.org/repos/asf/flink/blob/6360875f/flink-runtime/src/test/java/org/apache/flink/runtime/util/BlockerCheckpointStreamFactory.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/util/BlockerCheckpointStreamFactory.java b/flink-runtime/src/test/java/org/apache/flink/runtime/util/BlockerCheckpointStreamFactory.java
index 2091e00..727dc4f 100644
--- a/flink-runtime/src/test/java/org/apache/flink/runtime/util/BlockerCheckpointStreamFactory.java
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/util/BlockerCheckpointStreamFactory.java
@@ -146,9 +146,4 @@ public class BlockerCheckpointStreamFactory implements CheckpointStreamFactory {
 
 		return lastCreatedStream;
 	}
-
-	@Override
-	public void close() throws Exception {
-
-	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/6360875f/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/TaskCheckpointingBehaviourTest.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/TaskCheckpointingBehaviourTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/TaskCheckpointingBehaviourTest.java
index b1127d5..ddb5f9b 100644
--- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/TaskCheckpointingBehaviourTest.java
+++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/TaskCheckpointingBehaviourTest.java
@@ -362,9 +362,6 @@ public class TaskCheckpointingBehaviourTest extends TestLogger {
 		public CheckpointStateOutputStream createCheckpointStateOutputStream(long checkpointID, long timestamp) {
 			return new LockingOutputStream();
 		}
-
-		@Override
-		public void close() {}
 	}
 
 	private static final class LockingOutputStream extends CheckpointStateOutputStream {


[18/19] flink git commit: [hotfix] [hdfs] Avoid re-parsing URIs for all Hadoop File System calls.

Posted by se...@apache.org.
[hotfix] [hdfs] Avoid re-parsing URIs for all Hadoop File System calls.

Previously, this converted Flink paths (internally URIs) to strings and
then let the Hadoop Paths parse, validate, and normalize the strings to
URIs again.

Now we simply pass the URIs directly.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/0ed264b5
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/0ed264b5
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/0ed264b5

Branch: refs/heads/master
Commit: 0ed264b55560ab573f0086313c37f3110e99d49c
Parents: 4319725
Author: Stephan Ewen <se...@apache.org>
Authored: Fri Oct 27 19:25:22 2017 +0200
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:07 2018 +0100

----------------------------------------------------------------------
 .../flink/runtime/fs/hdfs/HadoopFileSystem.java | 43 ++++++++++++++------
 1 file changed, 30 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/0ed264b5/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileSystem.java
----------------------------------------------------------------------
diff --git a/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileSystem.java b/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileSystem.java
index 7bc5a0f..f17d7e1 100644
--- a/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileSystem.java
+++ b/flink-filesystems/flink-hadoop-fs/src/main/java/org/apache/flink/runtime/fs/hdfs/HadoopFileSystem.java
@@ -81,7 +81,7 @@ public class HadoopFileSystem extends FileSystem {
 
 	@Override
 	public FileStatus getFileStatus(final Path f) throws IOException {
-		org.apache.hadoop.fs.FileStatus status = this.fs.getFileStatus(new org.apache.hadoop.fs.Path(f.toString()));
+		org.apache.hadoop.fs.FileStatus status = this.fs.getFileStatus(toHadoopPath(f));
 		return new HadoopFileStatus(status);
 	}
 
@@ -108,42 +108,52 @@ public class HadoopFileSystem extends FileSystem {
 
 	@Override
 	public HadoopDataInputStream open(final Path f, final int bufferSize) throws IOException {
-		final org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(f.toString());
+		final org.apache.hadoop.fs.Path path = toHadoopPath(f);
 		final org.apache.hadoop.fs.FSDataInputStream fdis = this.fs.open(path, bufferSize);
 		return new HadoopDataInputStream(fdis);
 	}
 
 	@Override
 	public HadoopDataInputStream open(final Path f) throws IOException {
-		final org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(f.toString());
+		final org.apache.hadoop.fs.Path path = toHadoopPath(f);
 		final org.apache.hadoop.fs.FSDataInputStream fdis = fs.open(path);
 		return new HadoopDataInputStream(fdis);
 	}
 
 	@Override
 	@SuppressWarnings("deprecation")
-	public HadoopDataOutputStream create(final Path f, final boolean overwrite, final int bufferSize,
-			final short replication, final long blockSize) throws IOException {
+	public HadoopDataOutputStream create(
+			final Path f,
+			final boolean overwrite,
+			final int bufferSize,
+			final short replication,
+			final long blockSize) throws IOException {
+
 		final org.apache.hadoop.fs.FSDataOutputStream fdos = this.fs.create(
-			new org.apache.hadoop.fs.Path(f.toString()), overwrite, bufferSize, replication, blockSize);
+				toHadoopPath(f), overwrite, bufferSize, replication, blockSize);
 		return new HadoopDataOutputStream(fdos);
 	}
 
 	@Override
 	public HadoopDataOutputStream create(final Path f, final WriteMode overwrite) throws IOException {
-		final org.apache.hadoop.fs.FSDataOutputStream fsDataOutputStream = this.fs
-			.create(new org.apache.hadoop.fs.Path(f.toString()), overwrite == WriteMode.OVERWRITE);
+		final org.apache.hadoop.fs.FSDataOutputStream fsDataOutputStream =
+				this.fs.create(toHadoopPath(f), overwrite == WriteMode.OVERWRITE);
 		return new HadoopDataOutputStream(fsDataOutputStream);
 	}
 
 	@Override
 	public boolean delete(final Path f, final boolean recursive) throws IOException {
-		return this.fs.delete(new org.apache.hadoop.fs.Path(f.toString()), recursive);
+		return this.fs.delete(toHadoopPath(f), recursive);
+	}
+
+	@Override
+	public boolean exists(Path f) throws IOException {
+		return this.fs.exists(toHadoopPath(f));
 	}
 
 	@Override
 	public FileStatus[] listStatus(final Path f) throws IOException {
-		final org.apache.hadoop.fs.FileStatus[] hadoopFiles = this.fs.listStatus(new org.apache.hadoop.fs.Path(f.toString()));
+		final org.apache.hadoop.fs.FileStatus[] hadoopFiles = this.fs.listStatus(toHadoopPath(f));
 		final FileStatus[] files = new FileStatus[hadoopFiles.length];
 
 		// Convert types
@@ -156,13 +166,12 @@ public class HadoopFileSystem extends FileSystem {
 
 	@Override
 	public boolean mkdirs(final Path f) throws IOException {
-		return this.fs.mkdirs(new org.apache.hadoop.fs.Path(f.toString()));
+		return this.fs.mkdirs(toHadoopPath(f));
 	}
 
 	@Override
 	public boolean rename(final Path src, final Path dst) throws IOException {
-		return this.fs.rename(new org.apache.hadoop.fs.Path(src.toString()),
-			new org.apache.hadoop.fs.Path(dst.toString()));
+		return this.fs.rename(toHadoopPath(src), toHadoopPath(dst));
 	}
 
 	@SuppressWarnings("deprecation")
@@ -184,6 +193,14 @@ public class HadoopFileSystem extends FileSystem {
 		return fsKind;
 	}
 
+	// ------------------------------------------------------------------------
+	//  Utilities
+	// ------------------------------------------------------------------------
+
+	private static org.apache.hadoop.fs.Path toHadoopPath(Path path) {
+		return new org.apache.hadoop.fs.Path(path.toUri());
+	}
+
 	/**
 	 * Gets the kind of the file system from its scheme.
 	 *


[04/19] flink git commit: [hotfix] [doc] Fix typo in filesystems.md

Posted by se...@apache.org.
[hotfix] [doc] Fix typo in filesystems.md

This closes #5237


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/0ae70baa
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/0ae70baa
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/0ae70baa

Branch: refs/heads/master
Commit: 0ae70baabb70422ac7528deafe19c08d8a984594
Parents: c7c7270
Author: Matrix42 <93...@qq.com>
Authored: Wed Jan 3 23:32:45 2018 +0800
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:05 2018 +0100

----------------------------------------------------------------------
 docs/ops/filesystems.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/0ae70baa/docs/ops/filesystems.md
----------------------------------------------------------------------
diff --git a/docs/ops/filesystems.md b/docs/ops/filesystems.md
index 5b2a1e7..e589ba0 100644
--- a/docs/ops/filesystems.md
+++ b/docs/ops/filesystems.md
@@ -27,7 +27,7 @@ This page provides details on setting up and configuring distributed file system
 ## Flink' File System support
 
 Flink uses file systems both as a source and sink in streaming/batch applications, and as a target for checkpointing.
-These file systens can for example be *Unix/Windows file systems*, *HDFS*, or even object stores like *S3*.
+These file systems can for example be *Unix/Windows file systems*, *HDFS*, or even object stores like *S3*.
 
 The file system used for a specific file is determined by the file URI's scheme. For example `file:///home/user/text.txt` refers to
 a file in the local file system, while `hdfs://namenode:50010/data/user/text.txt` refers to a file in a specific HDFS cluster.


[06/19] flink git commit: [hotfix] [core] Add comments for class loading config options in CoreOptions

Posted by se...@apache.org.
[hotfix] [core] Add comments for class loading config options in CoreOptions


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/bc2efdd7
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/bc2efdd7
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/bc2efdd7

Branch: refs/heads/master
Commit: bc2efdd7dd47bc42ef1a43310ba5f2df60bfc424
Parents: fb29898
Author: Stephan Ewen <se...@apache.org>
Authored: Thu Jan 4 13:03:50 2018 +0100
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:05 2018 +0100

----------------------------------------------------------------------
 .../apache/flink/configuration/CoreOptions.java | 43 +++++++++++++++++++-
 1 file changed, 42 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/bc2efdd7/flink-core/src/main/java/org/apache/flink/configuration/CoreOptions.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/CoreOptions.java b/flink-core/src/main/java/org/apache/flink/configuration/CoreOptions.java
index cf10012..f93c4f1 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/CoreOptions.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/CoreOptions.java
@@ -27,13 +27,54 @@ import org.apache.flink.annotation.PublicEvolving;
 public class CoreOptions {
 
 	// ------------------------------------------------------------------------
-	//  process parameters
+	//  Classloading Parameters
 	// ------------------------------------------------------------------------
 
+	/**
+	 * Defines the class resolution strategy when loading classes from user code,
+	 * meaning whether to first check the user code jar ({@code "child-first"}) or
+	 * the application classpath ({@code "parent-first"})
+	 *
+	 * <p>The default settings indicate to load classes first from the user code jar,
+	 * which means that user code jars can include and load different dependencies than
+	 * Flink uses (transitively).
+	 *
+	 * <p>Exceptions to the rules are defined via {@link #ALWAYS_PARENT_FIRST_LOADER}.
+	 */
 	public static final ConfigOption<String> CLASSLOADER_RESOLVE_ORDER = ConfigOptions
 		.key("classloader.resolve-order")
 		.defaultValue("child-first");
 
+	/**
+	 * The namespace patterns for classes that are loaded with a preference from the
+	 * parent classloader, meaning the application class path, rather than any user code
+	 * jar file. This option only has an effect when {@link #CLASSLOADER_RESOLVE_ORDER} is
+	 * set to {@code "child-first"}.
+	 *
+	 * <p>It is important that all classes whose objects move between Flink's runtime and
+	 * any user code (including Flink connectors that run as part of the user code) are
+	 * covered by these patterns here. Otherwise it is be possible that the Flink runtime
+	 * and the user code load two different copies of a class through the different class
+	 * loaders. That leads to errors like "X cannot be cast to X" exceptions, where both
+	 * class names are equal, or "X cannot be assigned to Y", where X should be a subclass
+	 * of Y.
+	 *
+	 * <p>The following classes are loaded parent-first, to avoid any duplication:
+	 * <ul>
+	 *     <li>All core Java classes (java.*), because they must never be duplicated.</li>
+	 *     <li>All core Scala classes (scala.*). Currently Scala is used in the Flink
+	 *         runtime and in the user code, and some Scala classes cross the boundary,
+	 *         such as the <i>FunctionX</i> classes. That may change if Scala eventually
+	 *         lives purely as part of the user code.</li>
+	 *     <li>All Flink classes (org.apache.flink.*). Note that this means that connectors
+	 *         and formats (flink-avro, etc) are loaded parent-first as well if they are in the
+	 *         core classpath.</li>
+	 *     <li>Java annotations and loggers, defined by the following list:
+	 *         javax.annotation;org.slf4j;org.apache.log4j;org.apache.logging.log4j;ch.qos.logback.
+	 *         This is done for convenience, to avoid duplication of annotations and multiple
+	 *         log bindings.</li>
+	 * </ul>
+	 */
 	public static final ConfigOption<String> ALWAYS_PARENT_FIRST_LOADER = ConfigOptions
 		.key("classloader.parent-first-patterns")
 		.defaultValue("java.;scala.;org.apache.flink.;javax.annotation;org.slf4j;org.apache.log4j;org.apache.logging.log4j;ch.qos.logback");


[09/19] flink git commit: [hotfix] [core] Avoid redundant File path conversion in LocalFileSystem.getFileStatus(Path)

Posted by se...@apache.org.
[hotfix] [core] Avoid redundant File path conversion in LocalFileSystem.getFileStatus(Path)


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/b82f59f6
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/b82f59f6
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/b82f59f6

Branch: refs/heads/master
Commit: b82f59f64326e81cd8d1703c859d13485dff6958
Parents: bc588aa
Author: Stephan Ewen <se...@apache.org>
Authored: Wed Dec 6 15:10:22 2017 +0100
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:06 2018 +0100

----------------------------------------------------------------------
 .../main/java/org/apache/flink/core/fs/local/LocalFileSystem.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/b82f59f6/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java b/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
index d16108b..b1eb786 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
@@ -108,7 +108,7 @@ public class LocalFileSystem extends FileSystem {
 	public FileStatus getFileStatus(Path f) throws IOException {
 		final File path = pathToFile(f);
 		if (path.exists()) {
-			return new LocalFileStatus(pathToFile(f), this);
+			return new LocalFileStatus(path, this);
 		}
 		else {
 			throw new FileNotFoundException("File " + f + " does not exist or the user running "


[05/19] flink git commit: [FLINK-8374] [yarn, tests] Whitelist meaningless exception that may occur during Akka shutdown.

Posted by se...@apache.org.
[FLINK-8374] [yarn,tests] Whitelist meaningless exception that may occur during Akka shutdown.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/c7c72704
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/c7c72704
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/c7c72704

Branch: refs/heads/master
Commit: c7c72704ab8827245d08850edf3d9a448d18097f
Parents: bc2efdd
Author: Stephan Ewen <se...@apache.org>
Authored: Fri Jan 5 14:32:28 2018 +0100
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:05 2018 +0100

----------------------------------------------------------------------
 .../src/test/java/org/apache/flink/yarn/YarnTestBase.java     | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/c7c72704/flink-yarn-tests/src/test/java/org/apache/flink/yarn/YarnTestBase.java
----------------------------------------------------------------------
diff --git a/flink-yarn-tests/src/test/java/org/apache/flink/yarn/YarnTestBase.java b/flink-yarn-tests/src/test/java/org/apache/flink/yarn/YarnTestBase.java
index 99df3a4..ae39d0a 100644
--- a/flink-yarn-tests/src/test/java/org/apache/flink/yarn/YarnTestBase.java
+++ b/flink-yarn-tests/src/test/java/org/apache/flink/yarn/YarnTestBase.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -116,7 +116,10 @@ public abstract class YarnTestBase extends TestLogger {
 		// very specific on purpose
 		"Remote connection to [null] failed with java.net.ConnectException: Connection refused",
 		"Remote connection to [null] failed with java.nio.channels.NotYetConnectedException",
-		"java.io.IOException: Connection reset by peer"
+		"java.io.IOException: Connection reset by peer",
+
+		// this can happen in Akka 2.4 on shutdown.
+		"java.util.concurrent.RejectedExecutionException: Worker has already been shutdown"
 	};
 
 	// Temp directory which is deleted after the unit test.


[15/19] flink git commit: [hotfix] [tests] Remove unnecessary stack trace printing in StreamTaskTest

Posted by se...@apache.org.
[hotfix] [tests] Remove unnecessary stack trace printing in StreamTaskTest


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/bc588aac
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/bc588aac
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/bc588aac

Branch: refs/heads/master
Commit: bc588aac3a6cfe756351d8ce7d86143048f6e608
Parents: 3d0ed12
Author: Stephan Ewen <se...@apache.org>
Authored: Fri Jan 5 14:11:58 2018 +0100
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:06 2018 +0100

----------------------------------------------------------------------
 .../org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java    | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/bc588aac/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java
index 5059827..d7a26c3 100644
--- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java
+++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java
@@ -358,7 +358,6 @@ public class StreamTaskTest extends TestLogger {
 			streamTask.triggerCheckpoint(checkpointMetaData, CheckpointOptions.forCheckpoint());
 			fail("Expected test exception here.");
 		} catch (Exception e) {
-			e.printStackTrace();
 			assertEquals(testException, e.getCause());
 		}
 


[16/19] flink git commit: [hotfix] [checkpoints] Improve performance of ByteStreamStateHandle

Posted by se...@apache.org.
[hotfix] [checkpoints] Improve performance of ByteStreamStateHandle

The input stream from ByteStreamStateHandle did not overwrite the 'read(byte[], int, int)' method,
meaning that bulk byte reads resulted in many individual byte accesses.

Additionally, this change avoids accessing the data array through an outer class, but instead adds
a reference directly to the input stream class, avoiding one hop per access. That also allows
a more restricted access level on the fields, which may additionally help the jitter in some cases.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/1d38e0b4
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/1d38e0b4
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/1d38e0b4

Branch: refs/heads/master
Commit: 1d38e0b49a1936fef477a7a2a65abdd815f2d695
Parents: 0ed264b
Author: Stephan Ewen <se...@apache.org>
Authored: Wed Dec 6 14:51:22 2017 +0100
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:07 2018 +0100

----------------------------------------------------------------------
 .../apache/flink/core/fs/FSDataInputStream.java |   2 +-
 .../state/memory/ByteStreamStateHandle.java     |  38 +++-
 .../state/memory/ByteStreamStateHandleTest.java | 177 +++++++++++++++++++
 .../StateInitializationContextImplTest.java     |  14 +-
 4 files changed, 214 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/1d38e0b4/flink-core/src/main/java/org/apache/flink/core/fs/FSDataInputStream.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/FSDataInputStream.java b/flink-core/src/main/java/org/apache/flink/core/fs/FSDataInputStream.java
index fa931c6..5fc1de8 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/FSDataInputStream.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/FSDataInputStream.java
@@ -35,7 +35,7 @@ public abstract class FSDataInputStream extends InputStream {
 
 	/**
 	 * Seek to the given offset from the start of the file. The next read() will be from that location.
-	 * Can't seek past the end of the file.
+	 * Can't seek past the end of the stream.
 	 *
 	 * @param desired
 	 *        the desired offset

http://git-wip-us.apache.org/repos/asf/flink/blob/1d38e0b4/flink-runtime/src/main/java/org/apache/flink/runtime/state/memory/ByteStreamStateHandle.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/state/memory/ByteStreamStateHandle.java b/flink-runtime/src/main/java/org/apache/flink/runtime/state/memory/ByteStreamStateHandle.java
index 3a43d4f..7de66ef 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/state/memory/ByteStreamStateHandle.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/state/memory/ByteStreamStateHandle.java
@@ -34,13 +34,13 @@ public class ByteStreamStateHandle implements StreamStateHandle {
 	/**
 	 * The state data.
 	 */
-	protected final byte[] data;
+	private final byte[] data;
 
 	/**
 	 * A unique name of by which this state handle is identified and compared. Like a filename, all
 	 * {@link ByteStreamStateHandle} with the exact same name must also have the exact same content in data.
 	 */
-	protected final String handleName;
+	private final String handleName;
 
 	/**
 	 * Creates a new ByteStreamStateHandle containing the given data.
@@ -52,7 +52,7 @@ public class ByteStreamStateHandle implements StreamStateHandle {
 
 	@Override
 	public FSDataInputStream openInputStream() throws IOException {
-		return new ByteStateHandleInputStream();
+		return new ByteStateHandleInputStream(data);
 	}
 
 	public byte[] getData() {
@@ -81,7 +81,6 @@ public class ByteStreamStateHandle implements StreamStateHandle {
 			return false;
 		}
 
-
 		ByteStreamStateHandle that = (ByteStreamStateHandle) o;
 		return handleName.equals(that.handleName);
 	}
@@ -102,18 +101,22 @@ public class ByteStreamStateHandle implements StreamStateHandle {
 	/**
 	 * An input stream view on a byte array.
 	 */
-	private final class ByteStateHandleInputStream extends FSDataInputStream {
+	private static final class ByteStateHandleInputStream extends FSDataInputStream {
 
+		private final byte[] data;
 		private int index;
 
-		public ByteStateHandleInputStream() {
-			this.index = 0;
+		public ByteStateHandleInputStream(byte[] data) {
+			this.data = data;
 		}
 
 		@Override
 		public void seek(long desired) throws IOException {
-			Preconditions.checkArgument(desired >= 0 && desired < Integer.MAX_VALUE);
-			index = (int) desired;
+			if (desired >= 0 && desired <= data.length) {
+				index = (int) desired;
+			} else {
+				throw new IOException("position out of bounds");
+			}
 		}
 
 		@Override
@@ -125,5 +128,22 @@ public class ByteStreamStateHandle implements StreamStateHandle {
 		public int read() throws IOException {
 			return index < data.length ? data[index++] & 0xFF : -1;
 		}
+
+		@Override
+		public int read(byte[] b, int off, int len) throws IOException {
+			// note that any bounds checking on "byte[] b" happend anyways by the
+			// System.arraycopy() call below, so we don't add extra checks here
+
+			final int bytesLeft = data.length - index;
+			if (bytesLeft > 0) {
+				final int bytesToCopy = Math.min(len, bytesLeft);
+				System.arraycopy(data, index, b, off, bytesToCopy);
+				index += bytesToCopy;
+				return bytesToCopy;
+			}
+			else {
+				return -1;
+			}
+		}
 	}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/1d38e0b4/flink-runtime/src/test/java/org/apache/flink/runtime/state/memory/ByteStreamStateHandleTest.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/test/java/org/apache/flink/runtime/state/memory/ByteStreamStateHandleTest.java b/flink-runtime/src/test/java/org/apache/flink/runtime/state/memory/ByteStreamStateHandleTest.java
new file mode 100644
index 0000000..0175dc3
--- /dev/null
+++ b/flink-runtime/src/test/java/org/apache/flink/runtime/state/memory/ByteStreamStateHandleTest.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.runtime.state.memory;
+
+import org.apache.flink.core.fs.FSDataInputStream;
+
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+/**
+ * Tests for the {@link ByteStreamStateHandle}.
+ */
+public class ByteStreamStateHandleTest {
+
+	@Test
+	public void testStreamSeekAndPos() throws IOException {
+		final byte[] data = {34, 25, 22, 66, 88, 54};
+		final ByteStreamStateHandle handle = new ByteStreamStateHandle("name", data);
+
+		// read backwards, one byte at a time
+		for (int i = data.length; i >= 0; i--) {
+			FSDataInputStream in = handle.openInputStream();
+			in.seek(i);
+
+			assertEquals(i, (int) in.getPos());
+
+			if (i < data.length) {
+				assertEquals((int) data[i], in.read());
+				assertEquals(i + 1, (int) in.getPos());
+			} else {
+				assertEquals(-1, in.read());
+				assertEquals(i, (int) in.getPos());
+			}
+		}
+
+		// reading past the end makes no difference
+		FSDataInputStream in = handle.openInputStream();
+		in.seek(data.length);
+
+		// read multiple times, should not affect anything
+		assertEquals(-1, in.read());
+		assertEquals(-1, in.read());
+		assertEquals(-1, in.read());
+
+		assertEquals(data.length, (int) in.getPos());
+	}
+
+	@Test
+	public void testStreamSeekOutOfBounds() throws IOException {
+		final int len = 10;
+		final ByteStreamStateHandle handle = new ByteStreamStateHandle("name", new byte[len]);
+
+		// check negative offset
+		FSDataInputStream in = handle.openInputStream();
+		try {
+			in.seek(-2);
+			fail("should fail with an exception");
+		} catch (IOException e) {
+			// expected
+		}
+
+		// check integer overflow
+		in = handle.openInputStream();
+		try {
+			in.seek(len + 1);
+			fail("should fail with an exception");
+		} catch (IOException e) {
+			// expected
+		}
+
+		// check integer overflow
+		in = handle.openInputStream();
+		try {
+			in.seek(((long) Integer.MAX_VALUE) + 100L);
+			fail("should fail with an exception");
+		} catch (IOException e) {
+			// expected
+		}
+	}
+
+	@Test
+	public void testBulkRead() throws IOException {
+		final byte[] data = {34, 25, 22, 66};
+		final ByteStreamStateHandle handle = new ByteStreamStateHandle("name", data);
+		final int targetLen = 8;
+
+		for (int start = 0; start < data.length; start++) {
+			for (int num = 0; num < targetLen; num++) {
+				FSDataInputStream in = handle.openInputStream();
+				in.seek(start);
+
+				final byte[] target = new byte[targetLen];
+				final int read = in.read(target, targetLen - num, num);
+
+				assertEquals(Math.min(num, data.length - start), read);
+				for (int i = 0; i < read; i++) {
+					assertEquals(data[start + i], target[targetLen - num + i]);
+				}
+
+				int newPos = start + read;
+				assertEquals(newPos, (int) in.getPos());
+				assertEquals(newPos < data.length ? data[newPos] : -1, in.read());
+			}
+		}
+	}
+
+	@SuppressWarnings("ResultOfMethodCallIgnored")
+	@Test
+	public void testBulkReadINdexOutOfBounds() throws IOException {
+		final ByteStreamStateHandle handle = new ByteStreamStateHandle("name", new byte[10]);
+
+		// check negative offset
+		FSDataInputStream in = handle.openInputStream();
+		try {
+			in.read(new byte[10], -1, 5);
+			fail("should fail with an exception");
+		} catch (IndexOutOfBoundsException e) {
+			// expected
+		}
+
+		// check offset overflow
+		in = handle.openInputStream();
+		try {
+			in.read(new byte[10], 10, 5);
+			fail("should fail with an exception");
+		} catch (IndexOutOfBoundsException e) {
+			// expected
+		}
+
+		// check negative length
+		in = handle.openInputStream();
+		try {
+			in.read(new byte[10], 0, -2);
+			fail("should fail with an exception");
+		} catch (IndexOutOfBoundsException e) {
+			// expected
+		}
+
+		// check length too large
+		in = handle.openInputStream();
+		try {
+			in.read(new byte[10], 5, 6);
+			fail("should fail with an exception");
+		} catch (IndexOutOfBoundsException e) {
+			// expected
+		}
+
+		// check length integer overflow
+		in = handle.openInputStream();
+		try {
+			in.read(new byte[10], 5, Integer.MAX_VALUE);
+			fail("should fail with an exception");
+		} catch (IndexOutOfBoundsException e) {
+			// expected
+		}
+	}
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/1d38e0b4/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/operators/StateInitializationContextImplTest.java
----------------------------------------------------------------------
diff --git a/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/operators/StateInitializationContextImplTest.java b/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/operators/StateInitializationContextImplTest.java
index 1ba2e77..700f7e1 100644
--- a/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/operators/StateInitializationContextImplTest.java
+++ b/flink-streaming-java/src/test/java/org/apache/flink/streaming/api/operators/StateInitializationContextImplTest.java
@@ -38,7 +38,6 @@ import org.apache.flink.runtime.state.StateInitializationContextImpl;
 import org.apache.flink.runtime.state.StatePartitionStreamProvider;
 import org.apache.flink.runtime.state.memory.ByteStreamStateHandle;
 import org.apache.flink.runtime.util.LongArrayList;
-import org.apache.flink.util.Preconditions;
 
 import org.junit.Assert;
 import org.junit.Before;
@@ -251,19 +250,20 @@ public class StateInitializationContextImplTest {
 
 		@Override
 		public FSDataInputStream openInputStream() throws IOException {
+			final FSDataInputStream original = super.openInputStream();
+
 			return new FSDataInputStream() {
-				private int index = 0;
+
 				private boolean closed = false;
 
 				@Override
 				public void seek(long desired) throws IOException {
-					Preconditions.checkArgument(desired >= 0 && desired < Integer.MAX_VALUE);
-					index = (int) desired;
+					original.seek(desired);
 				}
 
 				@Override
 				public long getPos() throws IOException {
-					return index;
+					return original.getPos();
 				}
 
 				@Override
@@ -271,12 +271,12 @@ public class StateInitializationContextImplTest {
 					if (closed) {
 						throw new IOException("Stream closed");
 					}
-					return index < data.length ? data[index++] & 0xFF : -1;
+					return original.read();
 				}
 
 				@Override
 				public void close() throws IOException {
-					super.close();
+					original.close();
 					this.closed = true;
 				}
 			};


[07/19] flink git commit: [hotfix] [core] Improve local fs exists() performance

Posted by se...@apache.org.
[hotfix] [core] Improve local fs exists() performance

This avoids going though an exception in the case of non-existing files.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/4319725c
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/4319725c
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/4319725c

Branch: refs/heads/master
Commit: 4319725cfccd40fb063d126fd5fc36dc5feec158
Parents: 6360875
Author: Stephan Ewen <se...@apache.org>
Authored: Fri Oct 27 19:23:51 2017 +0200
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:06 2018 +0100

----------------------------------------------------------------------
 .../java/org/apache/flink/core/fs/local/LocalFileSystem.java   | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/4319725c/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java b/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
index b1eb786..ee2ecbe 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
@@ -150,6 +150,12 @@ public class LocalFileSystem extends FileSystem {
 	}
 
 	@Override
+	public boolean exists(Path f) throws IOException {
+		final File path = pathToFile(f);
+		return path.exists();
+	}
+
+	@Override
 	public FileStatus[] listStatus(final Path f) throws IOException {
 
 		final File localf = pathToFile(f);


[03/19] flink git commit: [hotfix] Fix many many typos

Posted by se...@apache.org.
[hotfix] Fix many many typos

Fix typos from the IntelliJ "Typos" inspection.

This closes #5242


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/3bc293ef
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/3bc293ef
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/3bc293ef

Branch: refs/heads/master
Commit: 3bc293efc2fbf989c4b044a39a072de7d6f679ea
Parents: 0ae70ba
Author: Greg Hogan <co...@greghogan.com>
Authored: Wed Jan 3 14:19:58 2018 -0500
Committer: Stephan Ewen <se...@apache.org>
Committed: Fri Jan 5 19:38:05 2018 +0100

----------------------------------------------------------------------
 .../org/apache/flink/client/CliFrontend.java    |  2 +-
 .../elasticsearch/ElasticsearchSinkBase.java    |  2 +-
 .../testutils/SourceSinkDataTestKit.java        |  2 +-
 .../connectors/kafka/FlinkKafkaProducer011.java |  2 +-
 .../kafka/FlinkKafkaProducer011ITCase.java      |  2 +-
 .../connectors/kafka/FlinkKafkaProducer08.java  |  8 +-
 .../connectors/kafka/KafkaAvroTableSource.java  |  2 +-
 .../connectors/kafka/KafkaTableSource.java      |  4 +-
 .../kafka/config/OffsetCommitModes.java         |  2 +-
 .../kafka/internals/ClosableBlockingQueue.java  |  2 +-
 .../kafka/FlinkKafkaProducerBaseTest.java       |  2 +-
 .../connectors/kafka/KafkaConsumerTestBase.java |  2 +-
 .../connectors/kinesis/proxy/KinesisProxy.java  |  2 +-
 .../kinesis/FlinkKinesisProducerTest.java       |  2 +-
 .../rabbitmq/common/RMQConnectionConfig.java    |  2 +-
 .../hadoop/mapred/HadoopInputFormatBase.java    |  2 +-
 .../mapred/HadoopIOFormatsITCase.java           |  2 +-
 .../flink/addons/hbase/TableInputFormat.java    |  2 +-
 .../addons/hbase/HBaseConnectorITCase.java      |  2 +-
 .../docker-flink/create-docker-swarm-service.sh |  2 +-
 .../wikiedits/WikipediaEditsSourceTest.java     |  2 +-
 .../streaming/state/RocksDBStateBackend.java    |  4 +-
 .../storm/exclamation/ExclamationWithBolt.java  |  2 +-
 .../exclamation/ExclamationWithSpoutITCase.java |  2 +-
 .../storm/tests/StormFieldsGroupingITCase.java  |  2 +-
 .../org/apache/flink/storm/api/FlinkClient.java |  2 +-
 .../flink/storm/wrappers/BoltWrapper.java       | 26 +++---
 .../storm/wrappers/MergedInputsBoltWrapper.java | 12 +--
 .../flink/storm/wrappers/SpoutWrapper.java      | 20 ++---
 .../storm/wrappers/WrapperSetupHelper.java      |  4 +-
 .../flink/api/common/ExecutionConfig.java       |  2 +-
 .../api/common/io/DelimitedInputFormat.java     |  2 +-
 .../flink/api/common/io/FileInputFormat.java    |  2 +-
 .../api/common/state/ListStateDescriptor.java   |  2 +-
 .../api/common/state/MapStateDescriptor.java    |  2 +-
 .../TypeSerializerSerializationUtil.java        |  2 +-
 .../common/typeutils/base/CharComparator.java   |  2 +-
 .../flink/api/java/functions/KeySelector.java   |  2 +-
 .../flink/api/java/typeutils/TypeExtractor.java |  4 +-
 ...ryoRegistrationSerializerConfigSnapshot.java | 11 +--
 .../java/typeutils/runtime/PojoComparator.java  |  2 +-
 .../typeutils/runtime/kryo/KryoSerializer.java  |  2 +-
 .../flink/configuration/ConfigConstants.java    |  6 +-
 .../core/fs/local/LocalDataInputStream.java     |  2 +-
 .../java/org/apache/flink/types/CharValue.java  |  2 +-
 .../java/org/apache/flink/types/IntValue.java   |  2 +-
 .../org/apache/flink/util/StringBasedID.java    |  2 +-
 .../flink/configuration/MemorySizeTest.java     |  4 +-
 .../org/apache/hadoop/conf/Configuration.java   | 84 ++++++++++----------
 .../flink/api/java/io/CsvInputFormatTest.java   |  6 +-
 .../api/java/operator/MaxByOperatorTest.java    | 16 ++--
 .../api/java/operator/MinByOperatorTest.java    | 16 ++--
 .../examples/java8/wordcount/WordCount.java     |  2 +-
 .../examples/java8/wordcount/WordCount.java     |  2 +-
 .../java/operators/lambdas/FilterITCase.java    |  2 +-
 .../java/org/apache/flink/cep/nfa/NFATest.java  |  2 +-
 .../random/RandomGenerableFactory.java          |  2 +-
 .../apache/flink/ml/classification/SVM.scala    |  2 +-
 .../ml/outlier/StochasticOutlierSelection.scala |  4 +-
 .../apache/flink/ml/pipeline/Estimator.scala    |  2 +-
 .../apache/flink/ml/pipeline/Predictor.scala    |  2 +-
 .../apache/flink/ml/pipeline/Transformer.scala  | 10 +--
 .../flink/ml/preprocessing/Splitter.scala       |  2 +-
 .../apache/flink/ml/recommendation/ALS.scala    |  2 +-
 .../resources/tableSourceConverter.properties   |  4 +-
 .../flink/table/api/TableEnvironment.scala      |  2 +-
 .../apache/flink/table/api/TableSchema.scala    |  4 +-
 .../flink/table/calcite/FlinkTypeSystem.scala   |  6 +-
 .../flink/table/codegen/CodeGenerator.scala     |  2 +-
 .../flink/table/functions/ScalarFunction.scala  |  2 +-
 .../flink/table/functions/TableFunction.scala   |  2 +-
 .../table/functions/utils/AggSqlFunction.scala  |  2 +-
 .../utils/UserDefinedFunctionUtils.scala        |  2 +-
 .../table/plan/nodes/CommonCorrelate.scala      |  2 +-
 .../table/plan/nodes/dataset/DataSetJoin.scala  |  6 +-
 .../nodes/datastream/retractionTraits.scala     |  5 +-
 .../flink/table/plan/schema/InlineTable.scala   |  4 +-
 ...IncrementalAggregateTimeWindowFunction.scala |  2 +-
 .../IncrementalAggregateWindowFunction.scala    |  2 +-
 .../aggregate/RowTimeSortProcessFunction.scala  |  2 +-
 .../aggregate/RowTimeUnboundedOver.scala        |  4 +-
 .../table/runtime/join/WindowJoinUtil.scala     |  2 +-
 .../flink/table/sources/CsvTableSource.scala    |  1 -
 .../flink/table/sources/TableSourceUtil.scala   |  2 +-
 .../flink/table/expressions/RowTypeTest.scala   |  2 +-
 .../mesos/entrypoint/MesosEntrypointUtils.java  |  2 +-
 .../services/AbstractMesosServices.java         |  2 +-
 .../apache/flink/metrics/jmx/JMXReporter.java   |  2 +-
 .../optimizer/traversals/PlanFinalizer.java     |  2 +-
 .../optimizer/UnionPropertyPropagationTest.java |  2 +-
 .../runtime/webmonitor/RedirectHandlerTest.java |  2 +-
 ...istoryServerStaticFileServerHandlerTest.java |  2 +-
 .../web-dashboard/vendor-local/d3-timeline.js   |  2 +-
 .../flink/runtime/akka/FlinkUntypedActor.java   |  8 +-
 .../flink/runtime/blob/PermanentBlobCache.java  |  2 +-
 .../checkpoint/CheckpointCoordinator.java       |  2 +-
 .../checkpoint/PendingCheckpointStats.java      |  2 +-
 .../runtime/client/JobListeningContext.java     |  2 +-
 .../clusterframework/BootstrapTools.java        |  2 +-
 .../clusterframework/FlinkResourceManager.java  |  4 +-
 .../overlays/HadoopConfOverlay.java             |  2 +-
 .../overlays/KeytabOverlay.java                 |  2 +-
 .../clusterframework/types/ResourceProfile.java |  2 +-
 .../executiongraph/AccessExecutionGraph.java    |  4 +-
 .../flink/runtime/executiongraph/Execution.java |  6 +-
 .../runtime/executiongraph/ExecutionVertex.java |  2 +-
 .../FsNegativeRunningJobsRegistry.java          |  2 +-
 .../iterative/task/AbstractIterativeTask.java   |  2 +-
 .../flink/runtime/jobgraph/JobVertex.java       |  2 +-
 .../flink/runtime/jobmaster/SlotContext.java    |  2 +-
 .../jobmaster/slotpool/SlotPoolGateway.java     |  2 +-
 .../flink/runtime/operators/BatchTask.java      |  8 +-
 .../flink/runtime/operators/CoGroupDriver.java  |  4 +-
 .../operators/hash/InPlaceMutableHashTable.java |  2 +-
 .../rest/messages/ClusterConfigurationInfo.java |  2 +-
 .../state/AbstractKeyedStateBackend.java        |  2 +-
 .../state/DefaultOperatorStateBackend.java      |  2 +-
 .../runtime/state/SharedStateRegistry.java      |  2 +-
 .../taskexecutor/slot/TaskSlotTable.java        |  2 +-
 .../apache/flink/runtime/taskmanager/Task.java  |  2 +-
 .../apache/flink/runtime/akka/AkkaUtils.scala   |  2 +-
 .../runtime/messages/ArchiveMessages.scala      |  2 +-
 .../checkpoint/CheckpointCoordinatorTest.java   |  2 +-
 .../checkpoint/PendingCheckpointTest.java       |  2 +-
 ...ZooKeeperCompletedCheckpointStoreITCase.java |  2 +-
 .../client/JobClientActorRecoveryITCase.java    |  2 +-
 .../executiongraph/GlobalModVersionTest.java    |  2 +-
 .../runtime/heartbeat/HeartbeatManagerTest.java |  2 +-
 .../flink/runtime/io/disk/ChannelViewsTest.java |  2 +-
 .../io/network/NetworkEnvironmentTest.java      |  4 +-
 .../io/network/TaskEventDispatcherTest.java     |  2 +-
 .../runtime/jobmanager/JobManagerTest.java      |  2 +-
 .../metrics/groups/TaskMetricGroupTest.java     |  2 +-
 .../query/KvStateLocationRegistryTest.java      |  2 +-
 .../runtime/query/KvStateLocationTest.java      |  2 +-
 .../slotmanager/SlotManagerTest.java            |  2 +-
 .../checkpoints/CheckpointStatsCacheTest.java   |  2 +-
 .../runtime/state/SharedStateRegistryTest.java  |  2 +-
 .../runtime/state/StateBackendTestBase.java     |  2 +-
 .../taskexecutor/TaskManagerServicesTest.java   |  2 +-
 .../TestingTaskExecutorGateway.java             |  2 +-
 .../TestingJobManagerMessages.scala             |  2 +-
 .../flink/api/scala/ScalaShellITCase.scala      |  2 +-
 .../flink/api/scala/codegen/TypeAnalyzer.scala  |  2 +-
 .../flink/api/scala/MaxByOperatorTest.scala     | 16 ++--
 .../flink/api/scala/MinByOperatorTest.scala     | 16 ++--
 .../flink/streaming/api/CheckpointingMode.java  |  2 +-
 .../api/datastream/CoGroupedStreams.java        |  2 +-
 .../environment/StreamExecutionEnvironment.java |  2 +-
 .../flink/streaming/api/graph/StreamConfig.java |  2 +-
 .../api/graph/StreamGraphGenerator.java         |  4 +-
 .../api/operators/HeapInternalTimerService.java |  2 +-
 .../api/operators/async/queue/AsyncResult.java  |  2 +-
 .../queue/UnorderedStreamElementQueue.java      |  2 +-
 .../CoFeedbackTransformation.java               |  2 +-
 .../transformations/OneInputTransformation.java |  2 +-
 .../operators/windowing/MergingWindowSet.java   |  2 +-
 .../operators/windowing/WindowOperator.java     |  2 +-
 .../streaming/util/typeutils/FieldAccessor.java |  2 +-
 .../flink/streaming/api/DataStreamTest.java     |  2 +-
 .../windowing/MergingWindowSetTest.java         |  4 +-
 .../windowing/WindowOperatorContractTest.java   |  4 +-
 .../windowing/WindowOperatorMigrationTest.java  |  4 +-
 .../operators/windowing/WindowOperatorTest.java |  2 +-
 .../tasks/StreamTaskTerminationTest.java        |  2 +-
 .../streaming/runtime/tasks/StreamTaskTest.java |  2 +-
 .../streaming/api/scala/CoGroupedStreams.scala  |  2 +-
 .../jar/CheckpointedStreamingProgram.java       |  2 +-
 .../classloading/jar/CustomKvStateProgram.java  |  2 +-
 .../apache/flink/test/operators/JoinITCase.java |  2 +-
 .../ZooKeeperLeaderElectionITCase.java          |  2 +-
 .../test/streaming/runtime/TimestampITCase.java |  4 +-
 .../org/apache/flink/test/util/CoordVector.java |  8 +-
 .../ParallelSessionsEventGenerator.java         |  2 +-
 .../BatchScalaAPICompletenessTest.scala         |  2 +-
 .../api/scala/operators/GroupingTest.scala      |  2 +-
 .../flink/yarn/TestingYarnTaskManager.scala     |  2 +-
 .../flink/yarn/YarnClusterDescriptorTest.java   |  6 +-
 tools/create_release_files.sh                   |  2 +-
 tools/list_deps.py                              |  2 +-
 tools/merge_flink_pr.py                         |  2 +-
 181 files changed, 328 insertions(+), 329 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java
----------------------------------------------------------------------
diff --git a/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java b/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java
index c535783..dff12f6 100644
--- a/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java
+++ b/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java
@@ -1164,7 +1164,7 @@ public class CliFrontend {
 
 	/**
 	 * Retrieves the loaded custom command-lines.
-	 * @return An unmodifiyable list of loaded custom command-lines.
+	 * @return An unmodifiable list of loaded custom command-lines.
 	 */
 	public static List<CustomCommandLine<?>> getCustomCommandLineList() {
 		return Collections.unmodifiableList(customCommandLines);

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
index c49d726..fe4343f 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
@@ -142,7 +142,7 @@ public abstract class ElasticsearchSinkBase<T> extends RichSinkFunction<T> imple
 	/** The user specified config map that we forward to Elasticsearch when we create the {@link Client}. */
 	private final Map<String, String> userConfig;
 
-	/** The function that is used to construct mulitple {@link ActionRequest ActionRequests} from each incoming element. */
+	/** The function that is used to construct multiple {@link ActionRequest ActionRequests} from each incoming element. */
 	private final ElasticsearchSinkFunction<T> elasticsearchSinkFunction;
 
 	/** User-provided handler for failed {@link ActionRequest ActionRequests}. */

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java
index 4e3d3e2..32498c6 100644
--- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java
+++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java
@@ -67,7 +67,7 @@ public class SourceSinkDataTestKit {
 	}
 
 	/**
-	 * A {@link ElasticsearchSinkFunction} that indexes each element it receives to a sepecified Elasticsearch index.
+	 * A {@link ElasticsearchSinkFunction} that indexes each element it receives to a specified Elasticsearch index.
 	 */
 	public static class TestElasticsearchSinkFunction implements ElasticsearchSinkFunction<Tuple2<Integer, String>> {
 		private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011.java b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011.java
index b14e487..ccf11e7 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011.java
@@ -170,7 +170,7 @@ public class FlinkKafkaProducer011<IN>
 	public static final String KEY_DISABLE_METRICS = "flink.disable-metrics";
 
 	/**
-	 * Descriptor of the transacionalIds list.
+	 * Descriptor of the transactional IDs list.
 	 */
 	private static final ListStateDescriptor<NextTransactionalIdHint> NEXT_TRANSACTIONAL_ID_HINT_DESCRIPTOR =
 		new ListStateDescriptor<>("next-transactional-id-hint", TypeInformation.of(NextTransactionalIdHint.class));

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011ITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011ITCase.java b/flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011ITCase.java
index 85735c8..3c3c86a 100644
--- a/flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011ITCase.java
+++ b/flink-connectors/flink-connector-kafka-0.11/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer011ITCase.java
@@ -101,7 +101,7 @@ public class FlinkKafkaProducer011ITCase extends KafkaTestBase {
 				assertIsCausedBy(FlinkKafka011ErrorCode.PRODUCERS_POOL_EMPTY, ex);
 			}
 
-			// Resume transactions before testHrness1 is being closed (in case of failures close() might not be called)
+			// Resume transactions before testHarness1 is being closed (in case of failures close() might not be called)
 			try (OneInputStreamOperatorTestHarness<Integer, Object> testHarness2 = createTestHarness(topic)) {
 				testHarness2.setup();
 				// restore from snapshot1, transactions with records 43 and 44 should be aborted

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java
index d2f17d2..20900f0 100644
--- a/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java
+++ b/flink-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer08.java
@@ -75,7 +75,7 @@ public class FlinkKafkaProducer08<IN> extends FlinkKafkaProducerBase<IN>  {
 	 * @param topicId The topic to write data to
 	 * @param serializationSchema A (keyless) serializable serialization schema for turning user objects into a kafka-consumable byte[]
 	 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
-	 * @param customPartitioner A serializable partitioner for assining messages to Kafka partitions.
+	 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
 	 */
 	public FlinkKafkaProducer08(String topicId, SerializationSchema<IN> serializationSchema, Properties producerConfig, FlinkKafkaPartitioner<IN> customPartitioner) {
 		this(topicId, new KeyedSerializationSchemaWrapper<>(serializationSchema), producerConfig, customPartitioner);
@@ -120,7 +120,7 @@ public class FlinkKafkaProducer08<IN> extends FlinkKafkaProducerBase<IN>  {
 	 * @param topicId The topic to write data to
 	 * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
 	 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
-	 * @param customPartitioner A serializable partitioner for assining messages to Kafka partitions.
+	 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
 	 */
 	public FlinkKafkaProducer08(String topicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig, FlinkKafkaPartitioner<IN> customPartitioner) {
 		super(topicId, serializationSchema, producerConfig, customPartitioner);
@@ -134,7 +134,7 @@ public class FlinkKafkaProducer08<IN> extends FlinkKafkaProducerBase<IN>  {
 	 * @param topicId The topic to write data to
 	 * @param serializationSchema A (keyless) serializable serialization schema for turning user objects into a kafka-consumable byte[]
 	 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
-	 * @param customPartitioner A serializable partitioner for assining messages to Kafka partitions.
+	 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
 	 *
 	 * @deprecated This is a deprecated constructor that does not correctly handle partitioning when
 	 *             producing to multiple topics. Use
@@ -151,7 +151,7 @@ public class FlinkKafkaProducer08<IN> extends FlinkKafkaProducerBase<IN>  {
 	 * @param topicId The topic to write data to
 	 * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
 	 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
-	 * @param customPartitioner A serializable partitioner for assining messages to Kafka partitions.
+	 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
 	 *
 	 * @deprecated This is a deprecated constructor that does not correctly handle partitioning when
 	 *             producing to multiple topics. Use

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
index 055b679..bf2e9db 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaAvroTableSource.java
@@ -156,7 +156,7 @@ public abstract class KafkaAvroTableSource extends KafkaTableSource implements D
 		private Map<String, String> fieldMapping;
 
 		/**
-		 * Sets the class of the Avro records that aree read from the Kafka topic.
+		 * Sets the class of the Avro records that are read from the Kafka topic.
 		 *
 		 * @param avroClass The class of the Avro records that are read from the Kafka topic.
 		 * @return The builder.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
index 2de16c0..385d6ad 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSource.java
@@ -313,7 +313,7 @@ public abstract class KafkaTableSource
 
 		/**
 		 * Configures a field of the table to be a processing time attribute.
-		 * The configured field must be present in the tabel schema and of type {@link Types#SQL_TIMESTAMP()}.
+		 * The configured field must be present in the table schema and of type {@link Types#SQL_TIMESTAMP()}.
 		 *
 		 * @param proctimeAttribute The name of the processing time attribute in the table schema.
 		 * @return The builder.
@@ -328,7 +328,7 @@ public abstract class KafkaTableSource
 
 		/**
 		 * Configures a field of the table to be a rowtime attribute.
-		 * The configured field must be present in the tabel schema and of type {@link Types#SQL_TIMESTAMP()}.
+		 * The configured field must be present in the table schema and of type {@link Types#SQL_TIMESTAMP()}.
 		 *
 		 * @param rowtimeAttribute The name of the rowtime attribute in the table schema.
 		 * @param timestampExtractor The {@link TimestampExtractor} to extract the rowtime attribute from the physical type.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java
index 9e1d9d5..85dc263 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/config/OffsetCommitModes.java
@@ -29,7 +29,7 @@ public class OffsetCommitModes {
 	 * @param enableCommitOnCheckpoint whether or not committing on checkpoints is enabled.
 	 * @param enableCheckpointing whether or not checkpoint is enabled for the consumer.
 	 *
-	 * @return the offset commmit mode to use, based on the configuration values.
+	 * @return the offset commit mode to use, based on the configuration values.
 	 */
 	public static OffsetCommitMode fromConfiguration(
 			boolean enableAutoCommit,

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
index da61dd0..db32733 100644
--- a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
+++ b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
@@ -70,7 +70,7 @@ public class ClosableBlockingQueue<E> {
 
 	/**
 	 * Creates a new empty queue, reserving space for at least the specified number
-	 * of elements. The queu can still grow, of more elements are added than the
+	 * of elements. The queue can still grow, of more elements are added than the
 	 * reserved space.
 	 *
 	 * @param initialSize The number of elements to reserve space for.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java
index d462953..ad118ae 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java
@@ -189,7 +189,7 @@ public class FlinkKafkaProducerBaseTest {
 	 * Test ensuring that if an async exception is caught for one of the flushed requests on checkpoint,
 	 * it should be rethrown; we set a timeout because the test will not finish if the logic is broken.
 	 *
-	 * <p>Note that this test does not test the snapshot method is blocked correctly when there are pending recorrds.
+	 * <p>Note that this test does not test the snapshot method is blocked correctly when there are pending records.
 	 * The test for that is covered in testAtLeastOnceProducer.
 	 */
 	@SuppressWarnings("unchecked")

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java
index 0a5608a..55a9c4d 100644
--- a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java
+++ b/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java
@@ -406,7 +406,7 @@ public abstract class KafkaConsumerTestBase extends KafkaTestBase {
 		final String consumeExtraRecordsJobName = "Consume Extra Records Job";
 		final String writeExtraRecordsJobName = "Write Extra Records Job";
 
-		// seriliazation / deserialization schemas for writing and consuming the extra records
+		// serialization / deserialization schemas for writing and consuming the extra records
 		final TypeInformation<Tuple2<Integer, Integer>> resultType =
 			TypeInformation.of(new TypeHint<Tuple2<Integer, Integer>>() {});
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
index 89e9f04..7daaad2 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
@@ -58,7 +58,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * <p>NOTE:
  * In the AWS KCL library, there is a similar implementation - {@link com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxy}.
  * This implementation differs mainly in that we can make operations to arbitrary Kinesis streams, which is a needed
- * functionality for the Flink Kinesis Connecter since the consumer may simultaneously read from multiple Kinesis streams.
+ * functionality for the Flink Kinesis Connector since the consumer may simultaneously read from multiple Kinesis streams.
  */
 public class KinesisProxy implements KinesisProxyInterface {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java
index 07c9cd7..86cefff 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java
@@ -167,7 +167,7 @@ public class FlinkKinesisProducerTest {
 	 * Test ensuring that if an async exception is caught for one of the flushed requests on checkpoint,
 	 * it should be rethrown; we set a timeout because the test will not finish if the logic is broken.
 	 *
-	 * <p>Note that this test does not test the snapshot method is blocked correctly when there are pending recorrds.
+	 * <p>Note that this test does not test the snapshot method is blocked correctly when there are pending records.
 	 * The test for that is covered in testAtLeastOnceProducer.
 	 */
 	@SuppressWarnings("ResultOfMethodCallIgnored")

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java b/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java
index cce800a..bbb48ce 100644
--- a/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java
+++ b/flink-connectors/flink-connector-rabbitmq/src/main/java/org/apache/flink/streaming/connectors/rabbitmq/common/RMQConnectionConfig.java
@@ -355,7 +355,7 @@ public class RMQConnectionConfig implements Serializable {
 		/**
 		 * Convenience method for setting the fields in an AMQP URI: host,
 		 * port, username, password and virtual host.  If any part of the
-		 * URI is ommited, the ConnectionFactory's corresponding variable
+		 * URI is omitted, the ConnectionFactory's corresponding variable
 		 * is left unchanged.
 		 * @param uri is the AMQP URI containing the data
 		 * @return the Builder

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/HadoopInputFormatBase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/HadoopInputFormatBase.java b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/HadoopInputFormatBase.java
index 5c26a58..69ff2d1 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/HadoopInputFormatBase.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/HadoopInputFormatBase.java
@@ -53,7 +53,7 @@ import java.util.ArrayList;
  *
  * @param <K> Type of key
  * @param <V> Type of value
- * @param <T> The type iself
+ * @param <T> The type itself
  */
 @Internal
 public abstract class HadoopInputFormatBase<K, V, T> extends HadoopInputFormatCommonBase<T, HadoopInputSplit> {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopIOFormatsITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopIOFormatsITCase.java b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopIOFormatsITCase.java
index bbe6395..46102a2 100644
--- a/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopIOFormatsITCase.java
+++ b/flink-connectors/flink-hadoop-compatibility/src/test/java/org/apache/flink/test/hadoopcompatibility/mapred/HadoopIOFormatsITCase.java
@@ -51,7 +51,7 @@ import java.util.Collection;
 import java.util.LinkedList;
 
 /**
- * Integraiton tests for Hadoop IO formats.
+ * Integration tests for Hadoop IO formats.
  */
 @RunWith(Parameterized.class)
 public class HadoopIOFormatsITCase extends JavaProgramTestBase {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java
index 52fd012..ebe25c8 100644
--- a/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java
+++ b/flink-connectors/flink-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java
@@ -51,7 +51,7 @@ public abstract class TableInputFormat<T extends Tuple> extends AbstractTableInp
 	 * The output from HBase is always an instance of {@link Result}.
 	 * This method is to copy the data in the Result instance into the required {@link Tuple}
 	 * @param r The Result instance from HBase that needs to be converted
-	 * @return The approriate instance of {@link Tuple} that contains the needed information.
+	 * @return The appropriate instance of {@link Tuple} that contains the needed information.
 	 */
 	protected abstract T mapResultToTuple(Result r);
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java
----------------------------------------------------------------------
diff --git a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java
index 3da4230..1f64397 100644
--- a/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java
+++ b/flink-connectors/flink-hbase/src/test/java/org/apache/flink/addons/hbase/HBaseConnectorITCase.java
@@ -312,7 +312,7 @@ public class HBaseConnectorITCase extends HBaseTestingClusterAutostarter {
 		}
 	}
 
-	// ######## TableInputFormate tests ############
+	// ######## TableInputFormat tests ############
 
 	class InputFormatForTestTable extends TableInputFormat<Tuple1<Integer>> {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-contrib/docker-flink/create-docker-swarm-service.sh
----------------------------------------------------------------------
diff --git a/flink-contrib/docker-flink/create-docker-swarm-service.sh b/flink-contrib/docker-flink/create-docker-swarm-service.sh
index 4393a70..0a9cc16 100755
--- a/flink-contrib/docker-flink/create-docker-swarm-service.sh
+++ b/flink-contrib/docker-flink/create-docker-swarm-service.sh
@@ -50,5 +50,5 @@ docker network create -d overlay ${OVERLAY_NETWORK_NAME}
 # Create the jobmanager service
 docker service create --name ${JOB_MANAGER_NAME} --env JOB_MANAGER_RPC_ADDRESS=${JOB_MANAGER_RPC_ADDRESS} -p ${SERVICE_PORT}:8081 --network ${OVERLAY_NETWORK_NAME} ${IMAGE_NAME} jobmanager
 
-# Create the taskmanger service (scale this out as needed)
+# Create the taskmanager service (scale this out as needed)
 docker service create --name ${TASK_MANAGER_NAME} --env JOB_MANAGER_RPC_ADDRESS=${JOB_MANAGER_RPC_ADDRESS} --network ${OVERLAY_NETWORK_NAME} ${IMAGE_NAME} taskmanager

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-contrib/flink-connector-wikiedits/src/test/java/org/apache/flink/streaming/connectors/wikiedits/WikipediaEditsSourceTest.java
----------------------------------------------------------------------
diff --git a/flink-contrib/flink-connector-wikiedits/src/test/java/org/apache/flink/streaming/connectors/wikiedits/WikipediaEditsSourceTest.java b/flink-contrib/flink-connector-wikiedits/src/test/java/org/apache/flink/streaming/connectors/wikiedits/WikipediaEditsSourceTest.java
index f6fa8e0..44bf8ee 100644
--- a/flink-contrib/flink-connector-wikiedits/src/test/java/org/apache/flink/streaming/connectors/wikiedits/WikipediaEditsSourceTest.java
+++ b/flink-contrib/flink-connector-wikiedits/src/test/java/org/apache/flink/streaming/connectors/wikiedits/WikipediaEditsSourceTest.java
@@ -71,7 +71,7 @@ public class WikipediaEditsSourceTest {
 
 				// Execute the source in a different thread and collect events into the queue.
 				// We do this in a separate thread in order to not block the main test thread
-				// indefinitely in case that somethign bad happens (like not receiving any
+				// indefinitely in case that something bad happens (like not receiving any
 				// events)
 				executorService.execute(() -> {
 					try {

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java
----------------------------------------------------------------------
diff --git a/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java b/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java
index 6ec7ec8..79771f3 100644
--- a/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java
+++ b/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java
@@ -82,7 +82,7 @@ public class RocksDBStateBackend extends AbstractStateBackend {
 	/** The state backend that we use for creating checkpoint streams. */
 	private final AbstractStateBackend checkpointStreamBackend;
 
-	/** Operator identifier that is used to uniqueify the RocksDB storage path. */
+	/** Operator identifier that is used to uniquify the RocksDB storage path. */
 	private String operatorIdentifier;
 
 	/** JobID for uniquifying backup paths. */
@@ -202,7 +202,7 @@ public class RocksDBStateBackend extends AbstractStateBackend {
 	 * {@link AbstractStateBackend#createStreamFactory(JobID, String) checkpoint stream}.
 	 *
 	 * @param checkpointStreamBackend The backend to store the
-	 * @param enableIncrementalCheckpointing True if incremental checkponting is enabled
+	 * @param enableIncrementalCheckpointing True if incremental checkpointing is enabled
 	 */
 	public RocksDBStateBackend(AbstractStateBackend checkpointStreamBackend, boolean enableIncrementalCheckpointing) {
 		this.checkpointStreamBackend = requireNonNull(checkpointStreamBackend);

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-contrib/flink-storm-examples/src/main/java/org/apache/flink/storm/exclamation/ExclamationWithBolt.java
----------------------------------------------------------------------
diff --git a/flink-contrib/flink-storm-examples/src/main/java/org/apache/flink/storm/exclamation/ExclamationWithBolt.java b/flink-contrib/flink-storm-examples/src/main/java/org/apache/flink/storm/exclamation/ExclamationWithBolt.java
index c31c36a..b6bb4d5 100644
--- a/flink-contrib/flink-storm-examples/src/main/java/org/apache/flink/storm/exclamation/ExclamationWithBolt.java
+++ b/flink-contrib/flink-storm-examples/src/main/java/org/apache/flink/storm/exclamation/ExclamationWithBolt.java
@@ -36,7 +36,7 @@ import org.apache.storm.utils.Utils;
  * <p>The input is a plain text file with lines separated by newline characters.
  *
  * <p>Usage:
- * <code>ExclamationWithmBolt &lt;text path&gt; &lt;result path&gt; &lt;number of exclamation marks&gt;</code><br>
+ * <code>ExclamationWithBolt &lt;text path&gt; &lt;result path&gt; &lt;number of exclamation marks&gt;</code><br>
  * If no parameters are provided, the program is run with default data from {@link WordCountData} with x=2.
  *
  * <p>This example shows how to:

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/exclamation/ExclamationWithSpoutITCase.java
----------------------------------------------------------------------
diff --git a/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/exclamation/ExclamationWithSpoutITCase.java b/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/exclamation/ExclamationWithSpoutITCase.java
index 61310e8..64294d1 100644
--- a/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/exclamation/ExclamationWithSpoutITCase.java
+++ b/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/exclamation/ExclamationWithSpoutITCase.java
@@ -23,7 +23,7 @@ import org.apache.flink.streaming.util.StreamingProgramTestBase;
 import org.apache.flink.test.testdata.WordCountData;
 
 /**
- * Test for the ExclamantionWithSpout example.
+ * Test for the ExclamationWithSpout example.
  */
 public class ExclamationWithSpoutITCase extends StreamingProgramTestBase {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/tests/StormFieldsGroupingITCase.java
----------------------------------------------------------------------
diff --git a/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/tests/StormFieldsGroupingITCase.java b/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/tests/StormFieldsGroupingITCase.java
index c861c9e..6e02d81 100644
--- a/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/tests/StormFieldsGroupingITCase.java
+++ b/flink-contrib/flink-storm-examples/src/test/java/org/apache/flink/storm/tests/StormFieldsGroupingITCase.java
@@ -77,7 +77,7 @@ public class StormFieldsGroupingITCase extends StreamingProgramTestBase {
 		Collections.sort(expectedResults);
 		System.out.println(actualResults);
 		for (int i = 0; i < actualResults.size(); ++i) {
-			//compare against actual results with removed prefex (as it depends e.g. on the hash function used)
+			//compare against actual results with removed prefix (as it depends e.g. on the hash function used)
 			Assert.assertEquals(expectedResults.get(i), actualResults.get(i));
 		}
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/api/FlinkClient.java
----------------------------------------------------------------------
diff --git a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/api/FlinkClient.java b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/api/FlinkClient.java
index d53ca42..24cb0fc 100644
--- a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/api/FlinkClient.java
+++ b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/api/FlinkClient.java
@@ -156,7 +156,7 @@ public class FlinkClient {
 		return this;
 	}
 
-	// The following methods are derived from "backtype.storm.generated.Nimubs.Client"
+	// The following methods are derived from "backtype.storm.generated.Nimbus.Client"
 
 	/**
 	 * Parameter {@code uploadedJarLocation} is actually used to point to the local jar, because Flink does not support

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/BoltWrapper.java
----------------------------------------------------------------------
diff --git a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/BoltWrapper.java b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/BoltWrapper.java
index 590faf3..ba2435e 100644
--- a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/BoltWrapper.java
+++ b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/BoltWrapper.java
@@ -75,7 +75,7 @@ public class BoltWrapper<IN, OUT> extends AbstractStreamOperator<OUT> implements
 
 	/** The IDs of the input streams for this bolt per producer task ID. */
 	private final HashMap<Integer, String> inputStreamIds = new HashMap<Integer, String>();
-	/** The IDs of the producres for this bolt per producer task ID.. */
+	/** The IDs of the producers for this bolt per producer task ID.. */
 	private final HashMap<Integer, String> inputComponentIds = new HashMap<Integer, String>();
 	/** The schema (ie, ordered field names) of the input streams per producer taskID. */
 	private final HashMap<Integer, Fields> inputSchemas = new HashMap<Integer, Fields>();
@@ -131,8 +131,8 @@ public class BoltWrapper<IN, OUT> extends AbstractStreamOperator<OUT> implements
 	 *            Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be
 	 *            of a raw type.
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not within range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not within range
 	 *             [1;25].
 	 */
 	public BoltWrapper(final IRichBolt bolt, final String[] rawOutputs)
@@ -153,8 +153,8 @@ public class BoltWrapper<IN, OUT> extends AbstractStreamOperator<OUT> implements
 	 *            Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be
 	 *            of a raw type.
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not with range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not with range
 	 *             [1;25].
 	 */
 	public BoltWrapper(final IRichBolt bolt, final Collection<String> rawOutputs) throws IllegalArgumentException {
@@ -176,8 +176,8 @@ public class BoltWrapper<IN, OUT> extends AbstractStreamOperator<OUT> implements
 	 *            Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be
 	 *            of a raw type.
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not with range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not with range
 	 *             [0;25].
 	 */
 	public BoltWrapper(
@@ -199,14 +199,14 @@ public class BoltWrapper<IN, OUT> extends AbstractStreamOperator<OUT> implements
 	 *            The Storm {@link IRichBolt bolt} to be used.
 	 * @param inputSchema
 	 *            The schema (ie, ordered field names) of the input stream. @throws IllegalArgumentException If
-	 *            {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *            {@code rawOuput} is {@code false} and the number of declared output attributes is not with range
+	 *            {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *            {@code rawOutput} is {@code false} and the number of declared output attributes is not with range
 	 * @param rawOutputs
 	 *            Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be
 	 *            of a raw type.
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not with range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not with range
 	 *             [0;25].
 	 */
 	public BoltWrapper(final IRichBolt bolt, final Fields inputSchema,
@@ -229,8 +229,8 @@ public class BoltWrapper<IN, OUT> extends AbstractStreamOperator<OUT> implements
 	 *            Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be
 	 *            of a raw type.
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not with range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not with range
 	 *             [0;25].
 	 */
 	public BoltWrapper(final IRichBolt bolt, final String name, final String inputStreamId,

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/MergedInputsBoltWrapper.java
----------------------------------------------------------------------
diff --git a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/MergedInputsBoltWrapper.java b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/MergedInputsBoltWrapper.java
index 07abffc..88ae355 100644
--- a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/MergedInputsBoltWrapper.java
+++ b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/MergedInputsBoltWrapper.java
@@ -63,8 +63,8 @@ public final class MergedInputsBoltWrapper<IN, OUT> extends BoltWrapper<StormTup
 	 *            Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be
 	 *            of a raw type.
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not within range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not within range
 	 *             [1;25].
 	 */
 	public MergedInputsBoltWrapper(final IRichBolt bolt, final String[] rawOutputs)
@@ -85,8 +85,8 @@ public final class MergedInputsBoltWrapper<IN, OUT> extends BoltWrapper<StormTup
 	 *            Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be
 	 *            of a raw type.
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not with range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not with range
 	 *             [1;25].
 	 */
 	public MergedInputsBoltWrapper(final IRichBolt bolt, final Collection<String> rawOutputs)
@@ -109,8 +109,8 @@ public final class MergedInputsBoltWrapper<IN, OUT> extends BoltWrapper<StormTup
 	 *            Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be
 	 *            of a raw type.
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not with range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not with range
 	 *             [0;25].
 	 */
 	public MergedInputsBoltWrapper(final IRichBolt bolt, final String name, final Collection<String> rawOutputs)

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/SpoutWrapper.java
----------------------------------------------------------------------
diff --git a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/SpoutWrapper.java b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/SpoutWrapper.java
index 6d37b29..882ba27 100644
--- a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/SpoutWrapper.java
+++ b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/SpoutWrapper.java
@@ -115,8 +115,8 @@ public final class SpoutWrapper<OUT> extends RichParallelSourceFunction<OUT> imp
 	 *            Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be
 	 *            of a raw type. (Can be {@code null}.)
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not with range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not with range
 	 *             [0;25].
 	 */
 	public SpoutWrapper(final IRichSpout spout, final String[] rawOutputs)
@@ -141,8 +141,8 @@ public final class SpoutWrapper<OUT> extends RichParallelSourceFunction<OUT> imp
 	 *            terminates if no tuple was emitted for the first time. If value is {@code null}, finite invocation is
 	 *            disabled.
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not with range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not with range
 	 *             [0;25].
 	 */
 	public SpoutWrapper(final IRichSpout spout, final String[] rawOutputs,
@@ -163,8 +163,8 @@ public final class SpoutWrapper<OUT> extends RichParallelSourceFunction<OUT> imp
 	 *            Contains stream names if a single attribute output stream, should not be of type {@link Tuple1} but be
 	 *            of a raw type. (Can be {@code null}.)
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not with range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not with range
 	 *             [0;25].
 	 */
 	public SpoutWrapper(final IRichSpout spout, final Collection<String> rawOutputs)
@@ -189,8 +189,8 @@ public final class SpoutWrapper<OUT> extends RichParallelSourceFunction<OUT> imp
 	 *            terminates if no tuple was emitted for the first time. If value is {@code null}, finite invocation is
 	 *            disabled.
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not with range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not with range
 	 *             [0;25].
 	 */
 	public SpoutWrapper(final IRichSpout spout, final Collection<String> rawOutputs,
@@ -217,8 +217,8 @@ public final class SpoutWrapper<OUT> extends RichParallelSourceFunction<OUT> imp
 	 *            terminates if no tuple was emitted for the first time. If value is {@code null}, finite invocation is
 	 *            disabled.
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not with range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not with range
 	 *             [0;25].
 	 */
 	public SpoutWrapper(final IRichSpout spout, final String name, final Collection<String> rawOutputs,

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/WrapperSetupHelper.java
----------------------------------------------------------------------
diff --git a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/WrapperSetupHelper.java b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/WrapperSetupHelper.java
index 1611211..1d3a544 100644
--- a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/WrapperSetupHelper.java
+++ b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/wrappers/WrapperSetupHelper.java
@@ -62,8 +62,8 @@ class WrapperSetupHelper {
 	 *            {@link org.apache.flink.api.java.tuple.Tuple1 Tuple1} but be of a raw type. (Can be {@code null}.)
 	 * @return The number of attributes to be used for each stream.
 	 * @throws IllegalArgumentException
-	 *             If {@code rawOuput} is {@code true} and the number of declared output attributes is not 1 or if
-	 *             {@code rawOuput} is {@code false} and the number of declared output attributes is not with range
+	 *             If {@code rawOutput} is {@code true} and the number of declared output attributes is not 1 or if
+	 *             {@code rawOutput} is {@code false} and the number of declared output attributes is not with range
 	 *             [0;25].
 	 */
 	static HashMap<String, Integer> getNumberOfAttributes(final IComponent spoutOrBolt,

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java b/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
index 9f39c46..fb888c0 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
@@ -205,7 +205,7 @@ public class ExecutionConfig implements Serializable, Archiveable<ArchivedExecut
 	}
 
 	/**
-	 * Sets the interval of the automatic watermark emission. Watermaks are used throughout
+	 * Sets the interval of the automatic watermark emission. Watermarks are used throughout
 	 * the streaming system to keep track of the progress of time. They are used, for example,
 	 * for time based windowing.
 	 *

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/api/common/io/DelimitedInputFormat.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/io/DelimitedInputFormat.java b/flink-core/src/main/java/org/apache/flink/api/common/io/DelimitedInputFormat.java
index 1d344b9..04b04b8 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/io/DelimitedInputFormat.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/io/DelimitedInputFormat.java
@@ -588,7 +588,7 @@ public abstract class DelimitedInputFormat<OT> extends FileInputFormat<OT> imple
 			int startPos = this.readPos - delimPos;
 			int count;
 
-			// Search for next occurence of delimiter in read buffer.
+			// Search for next occurrence of delimiter in read buffer.
 			while (this.readPos < this.limit && delimPos < this.delimiter.length) {
 				if ((this.readBuffer[this.readPos]) == this.delimiter[delimPos]) {
 					// Found the expected delimiter character. Continue looking for the next character of delimiter.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java b/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
index f43bd22..038a3c3 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
@@ -256,7 +256,7 @@ public abstract class FileInputFormat<OT> extends RichInputFormat<OT, FileInputS
 		// paths) to compute the preview graph. The following is a workaround for
 		// this situation and we should fix this.
 
-		// comment (Stephan Ewen) this should be no longer relevant with the current Java/Scalal APIs.
+		// comment (Stephan Ewen) this should be no longer relevant with the current Java/Scala APIs.
 		if (filePath.isEmpty()) {
 			setFilePath(new Path());
 			return;

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java b/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
index a50e25d..e59d6ee 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
@@ -31,7 +31,7 @@ import java.util.List;
  * is a list that can be appended and iterated over.
  * 
  * <p>Using {@code ListState} is typically more efficient than manually maintaining a list in a
- * {@link ValueState}, because the backing implementation can support efficient appends, rathern then
+ * {@link ValueState}, because the backing implementation can support efficient appends, rather than
  * replacing the full list on write.
  * 
  * <p>To create keyed list state (on a KeyedStream), use 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java b/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java
index d4a49f8..16c00cb 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/state/MapStateDescriptor.java
@@ -54,7 +54,7 @@ public class MapStateDescriptor<UK, UV> extends StateDescriptor<MapState<UK, UV>
 	}
 
 	/**
-	 * Create a new {@code MapStateDescriptor} with the given name and the given type informations.
+	 * Create a new {@code MapStateDescriptor} with the given name and the given type information.
 	 *
 	 * @param name The name of the {@code MapStateDescriptor}.
 	 * @param keyTypeInfo The type information for the keys in the state.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerSerializationUtil.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerSerializationUtil.java b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerSerializationUtil.java
index c6291ad..c68bb79 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerSerializationUtil.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializerSerializationUtil.java
@@ -76,7 +76,7 @@ public class TypeSerializerSerializationUtil {
 
 	/**
 	 * An {@link ObjectInputStream} that ignores serialVersionUID mismatches when deserializing objects of
-	 * anonymous classes or our Scala serializer classes and also replaces occurences of GenericData.Array
+	 * anonymous classes or our Scala serializer classes and also replaces occurrences of GenericData.Array
 	 * (from Avro) by a dummy class so that the KryoSerializer can still be deserialized without
 	 * Avro being on the classpath.
 	 *

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/CharComparator.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/CharComparator.java b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/CharComparator.java
index f9e186a..3e36b4c 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/CharComparator.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/CharComparator.java
@@ -60,7 +60,7 @@ public final class CharComparator extends BasicTypeComparator<Character> {
 	@Override
 	public void putNormalizedKey(Character value, MemorySegment target, int offset, int numBytes) {
 		// note that the char is an unsigned data type in java and consequently needs
-		// no code that transforms the signed representation to an offsetted representation
+		// no code that transforms the signed representation to an offset representation
 		// that is equivalent to unsigned, when compared byte by byte
 		if (numBytes == 2) {
 			// default case, full normalized key

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/api/java/functions/KeySelector.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/java/functions/KeySelector.java b/flink-core/src/main/java/org/apache/flink/api/java/functions/KeySelector.java
index 63e76a9..4aa8469 100644
--- a/flink-core/src/main/java/org/apache/flink/api/java/functions/KeySelector.java
+++ b/flink-core/src/main/java/org/apache/flink/api/java/functions/KeySelector.java
@@ -25,7 +25,7 @@ import java.io.Serializable;
 
 /**
  * The {@link KeySelector} allows to use deterministic objects for operations such as
- * reduce, reduceGroup, join, coGoup, etc. If invoked multiple times on the same object,
+ * reduce, reduceGroup, join, coGroup, etc. If invoked multiple times on the same object,
  * the returned key must be the same.
  * 
  * The extractor takes an object and returns the deterministic key for that object.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
index 4767838..d8f0712 100644
--- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
+++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
@@ -481,7 +481,7 @@ public class TypeExtractor {
 				final int paramLen = exec.getParameterTypes().length;
 
 				final Method sam = TypeExtractionUtils.getSingleAbstractMethod(Partitioner.class);
-				// number of parameters the SAM of implemented interface has, the parameter indexing aplicates to this range
+				// number of parameters the SAM of implemented interface has; the parameter indexing applies to this range
 				final int baseParametersLen = sam.getParameterTypes().length;
 
 				final Type keyType = TypeExtractionUtils.extractTypeFromLambda(
@@ -581,7 +581,7 @@ public class TypeExtractor {
 
 				final Method sam = TypeExtractionUtils.getSingleAbstractMethod(baseClass);
 
-				// number of parameters the SAM of implemented interface has, the parameter indexing aplicates to this range
+				// number of parameters the SAM of implemented interface has; the parameter indexing applies to this range
 				final int baseParametersLen = sam.getParameterTypes().length;
 
 				// executable references "this" implicitly

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoRegistrationSerializerConfigSnapshot.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoRegistrationSerializerConfigSnapshot.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoRegistrationSerializerConfigSnapshot.java
index cdf6b23..ad003cc 100644
--- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoRegistrationSerializerConfigSnapshot.java
+++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoRegistrationSerializerConfigSnapshot.java
@@ -18,10 +18,6 @@
 
 package org.apache.flink.api.java.typeutils.runtime;
 
-import com.esotericsoftware.kryo.Kryo;
-import com.esotericsoftware.kryo.Serializer;
-import com.esotericsoftware.kryo.io.Input;
-import com.esotericsoftware.kryo.io.Output;
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.ExecutionConfig;
 import org.apache.flink.api.common.typeutils.GenericTypeSerializerConfigSnapshot;
@@ -31,6 +27,11 @@ import org.apache.flink.core.memory.DataInputView;
 import org.apache.flink.core.memory.DataOutputView;
 import org.apache.flink.util.InstantiationUtil;
 import org.apache.flink.util.Preconditions;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -220,7 +221,7 @@ public abstract class KryoRegistrationSerializerConfigSnapshot<T> extends Generi
 	public static class DummyRegisteredClass implements Serializable {}
 
 	/**
-	 * Placeholder dummmy for a previously registered Kryo serializer that is no longer valid or in classpath on restore.
+	 * Placeholder dummy for a previously registered Kryo serializer that is no longer valid or in classpath on restore.
 	 */
 	public static class DummyKryoSerializerClass<RC> extends Serializer<RC> implements Serializable {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoComparator.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoComparator.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoComparator.java
index a3f4280..ece790e 100644
--- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoComparator.java
+++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoComparator.java
@@ -86,7 +86,7 @@ public final class PojoComparator<T> extends CompositeTypeComparator<T> implemen
 					inverted = k.invertNormalizedKey();
 				}
 				else if (k.invertNormalizedKey() != inverted) {
-					// if a successor does not agree on the invertion direction, it cannot be part of the normalized key
+					// if a successor does not agree on the inversion direction, it cannot be part of the normalized key
 					break;
 				}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java
index 560e5b1..f60ce46 100644
--- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java
+++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java
@@ -309,7 +309,7 @@ public class KryoSerializer<T> extends TypeSerializer<T> {
 	// --------------------------------------------------------------------------------------------
 
 	/**
-	 * Returns the Chill Kryo Serializer which is implictly added to the classpath via flink-runtime.
+	 * Returns the Chill Kryo Serializer which is implicitly added to the classpath via flink-runtime.
 	 * Falls back to the default Kryo serializer if it can't be found.
 	 * @return The Kryo serializer instance.
 	 */

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/configuration/ConfigConstants.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/configuration/ConfigConstants.java b/flink-core/src/main/java/org/apache/flink/configuration/ConfigConstants.java
index 50039ac..545765f 100644
--- a/flink-core/src/main/java/org/apache/flink/configuration/ConfigConstants.java
+++ b/flink-core/src/main/java/org/apache/flink/configuration/ConfigConstants.java
@@ -484,7 +484,7 @@ public final class ConfigConstants {
 	public static final String YARN_TASK_MANAGER_ENV_PREFIX = "yarn.taskmanager.env.";
 
 	/**
-	 * Template for the YARN container start incovation.
+	 * Template for the YARN container start invocation.
 	 */
 	public static final String YARN_CONTAINER_START_COMMAND_TEMPLATE =
 		"yarn.container-start-command-template";
@@ -594,7 +594,7 @@ public final class ConfigConstants {
 	// ------------------------ Hadoop Configuration ------------------------
 
 	/**
-	 * Path to hdfs-defaul.xml file
+	 * Path to hdfs-default.xml file
 	 *
 	 * @deprecated Use environment variable HADOOP_CONF_DIR instead.
 	 */
@@ -980,7 +980,7 @@ public final class ConfigConstants {
 
 	// --------------------------- High Availability --------------------------
 
-	/** Defines high availabilty mode used for the cluster execution ("NONE", "ZOOKEEPER") */
+	/** Defines high availability mode used for the cluster execution ("NONE", "ZOOKEEPER") */
 	@PublicEvolving
 	public static final String HA_MODE = "high-availability";
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalDataInputStream.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalDataInputStream.java b/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalDataInputStream.java
index 63017e3..ab70b82 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalDataInputStream.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalDataInputStream.java
@@ -75,7 +75,7 @@ public class LocalDataInputStream extends FSDataInputStream {
 
 	@Override
 	public void close() throws IOException {
-		// Accoring to javadoc, this also closes the channel
+		// According to javadoc, this also closes the channel
 		this.fis.close();
 	}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/types/CharValue.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/types/CharValue.java b/flink-core/src/main/java/org/apache/flink/types/CharValue.java
index f800832..a815b77 100644
--- a/flink-core/src/main/java/org/apache/flink/types/CharValue.java
+++ b/flink-core/src/main/java/org/apache/flink/types/CharValue.java
@@ -124,7 +124,7 @@ public class CharValue implements NormalizableKey<CharValue>, ResettableValue<Ch
 	@Override
 	public void copyNormalizedKey(MemorySegment target, int offset, int len) {
 		// note that the char is an unsigned data type in java and consequently needs
-		// no code that transforms the signed representation to an offsetted representation
+		// no code that transforms the signed representation to an offset representation
 		// that is equivalent to unsigned, when compared byte by byte
 		if (len == 2) {
 			// default case, full normalized key

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/types/IntValue.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/types/IntValue.java b/flink-core/src/main/java/org/apache/flink/types/IntValue.java
index 347fd1d..bd0b39d 100644
--- a/flink-core/src/main/java/org/apache/flink/types/IntValue.java
+++ b/flink-core/src/main/java/org/apache/flink/types/IntValue.java
@@ -123,7 +123,7 @@ public class IntValue implements NormalizableKey<IntValue>, ResettableValue<IntV
 
 	@Override
 	public void copyNormalizedKey(MemorySegment target, int offset, int len) {
-		// take out value and add the integer min value. This gets an offsetted
+		// take out value and add the integer min value. This gets an offset
 		// representation when interpreted as an unsigned integer (as is the case
 		// with normalized keys). write this value as big endian to ensure the
 		// most significant byte comes first.

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/main/java/org/apache/flink/util/StringBasedID.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/util/StringBasedID.java b/flink-core/src/main/java/org/apache/flink/util/StringBasedID.java
index 7245e61..fe03fa9 100644
--- a/flink-core/src/main/java/org/apache/flink/util/StringBasedID.java
+++ b/flink-core/src/main/java/org/apache/flink/util/StringBasedID.java
@@ -34,7 +34,7 @@ public class StringBasedID implements Serializable {
 	private final String keyString;
 
 	/**
-	 * Protected constructor to enfore that subclassing.
+	 * Protected constructor to enforce that subclassing.
 	 */
 	protected StringBasedID(String keyString) {
 		this.keyString = Preconditions.checkNotNull(keyString);

http://git-wip-us.apache.org/repos/asf/flink/blob/3bc293ef/flink-core/src/test/java/org/apache/flink/configuration/MemorySizeTest.java
----------------------------------------------------------------------
diff --git a/flink-core/src/test/java/org/apache/flink/configuration/MemorySizeTest.java b/flink-core/src/test/java/org/apache/flink/configuration/MemorySizeTest.java
index dbdd96b..9df541e 100644
--- a/flink-core/src/test/java/org/apache/flink/configuration/MemorySizeTest.java
+++ b/flink-core/src/test/java/org/apache/flink/configuration/MemorySizeTest.java
@@ -161,7 +161,7 @@ public class MemorySizeTest {
 			fail("exception expected");
 		} catch (IllegalArgumentException ignored) {}
 
-		// brank
+		// blank
 		try {
 			MemorySize.parseBytes("     ");
 			fail("exception expected");
@@ -185,7 +185,7 @@ public class MemorySizeTest {
 			fail("exception expected");
 		} catch (IllegalArgumentException ignored) {}
 
-		// negavive number
+		// negative number
 		try {
 			MemorySize.parseBytes("-100 bytes");
 			fail("exception expected");