You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by fh...@apache.org on 2015/10/21 14:13:34 UTC

[5/6] flink git commit: [FLINK-2876] Minutiae

[FLINK-2876] Minutiae

A collection of small documentation and grammar updates.

This closes #1277


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/6666ea58
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/6666ea58
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/6666ea58

Branch: refs/heads/master
Commit: 6666ea58de3ef94032014d891c2de5671361a494
Parents: 7a959bc
Author: Greg Hogan <co...@greghogan.com>
Authored: Tue Oct 20 11:57:33 2015 -0400
Committer: Fabian Hueske <fh...@apache.org>
Committed: Wed Oct 21 13:37:03 2015 +0200

----------------------------------------------------------------------
 docs/apis/dataset_transformations.md            | 47 +++++++++-----------
 docs/apis/programming_guide.md                  | 12 ++---
 .../api/common/accumulators/Accumulator.java    |  2 +-
 .../common/accumulators/AverageAccumulator.java |  4 +-
 .../api/common/typeutils/TypeComparator.java    |  4 +-
 .../relational/EmptyFieldsCountAccumulator.java |  5 +--
 .../flink/api/java/ExecutionEnvironment.java    |  2 +-
 .../translation/Tuple3UnwrappingIterator.java   |  2 +-
 .../plantranslate/JobGraphGenerator.java        |  2 +-
 .../apache/flink/runtime/client/JobClient.java  |  2 +-
 .../iomanager/AsynchronousFileIOChannel.java    |  2 +-
 .../task/IterationIntermediateTask.java         |  2 +-
 .../flink/runtime/operators/BatchTask.java      |  2 +-
 .../operators/hash/MutableHashTable.java        |  2 +-
 .../sort/PartialOrderPriorityQueue.java         |  2 +-
 .../flink/runtime/taskmanager/TaskManager.scala |  2 +-
 pom.xml                                         |  2 +-
 17 files changed, 45 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/docs/apis/dataset_transformations.md
----------------------------------------------------------------------
diff --git a/docs/apis/dataset_transformations.md b/docs/apis/dataset_transformations.md
index feed121..cfa138c 100644
--- a/docs/apis/dataset_transformations.md
+++ b/docs/apis/dataset_transformations.md
@@ -240,18 +240,6 @@ This problem can be overcome by hinting the return type of `project` operator li
 DataSet<Tuple1<String>> ds2 = ds.<Tuple1<String>>project(0).distinct(0);
 ~~~
 
-### Transformations on Grouped DataSet
-
-The reduce operations can operate on grouped data sets. Specifying the key to
-be used for grouping can be done in many ways:
-
-- key expressions
-- a key-selector function
-- one or more field position keys (Tuple DataSet only)
-- Case Class fields (Case Classes only)
-
-Please look at the reduce examples to see how the grouping keys are specified.
-
 </div>
 <div data-lang="python" markdown="1">
 
@@ -259,15 +247,20 @@ Please look at the reduce examples to see how the grouping keys are specified.
 out = in.project(2,0);
 ~~~
 
+</div>
+</div>
+
 ### Transformations on Grouped DataSet
 
 The reduce operations can operate on grouped data sets. Specifying the key to
-be used for grouping can be done using one or more field position keys (Tuple DataSet only).
+be used for grouping can be done in many ways:
 
-Please look at the reduce examples to see how the grouping keys are specified.
+- key expressions
+- a key-selector function
+- one or more field position keys (Tuple DataSet only)
+- Case Class fields (Case Classes only)
 
-</div>
-</div>
+Please look at the reduce examples to see how the grouping keys are specified.
 
 ### Reduce on Grouped DataSet
 
@@ -679,9 +672,10 @@ an alternative WordCount implementation. In the implementation,
 
 ~~~java
 DataSet<String> input = [..] // The words received as input
-DataSet<String> groupedInput = input.groupBy(0); // group identical words
 
-DataSet<Tuple2<String, Integer>> combinedWords = groupedInput.combineGroup(new GroupCombineFunction<String, Tuple2<String, Integer>() {
+DataSet<Tuple2<String, Integer>> combinedWords = input
+  .groupBy(0); // group identical words
+  .combineGroup(new GroupCombineFunction<String, Tuple2<String, Integer>() {
 
     public void combine(Iterable<String> words, Collector<Tuple2<String, Integer>>) { // combine
         int count = 0;
@@ -692,9 +686,9 @@ DataSet<Tuple2<String, Integer>> combinedWords = groupedInput.combineGroup(new G
     }
 });
 
-DataSet<Tuple2<String, Integer>> groupedCombinedWords = combinedWords.groupBy(0); // group by words again
-
-DataSet<Tuple2<String, Integer>> output = combinedWords.reduceGroup(new GroupReduceFunction() { // group reduce with full data exchange
+DataSet<Tuple2<String, Integer>> output = combinedWords
+  .groupBy(0);                             // group by words again
+  .reduceGroup(new GroupReduceFunction() { // group reduce with full data exchange
 
     public void reduce(Iterable<Tuple2<String, Integer>>, Collector<Tuple2<String, Integer>>) {
         int count = 0;
@@ -711,9 +705,10 @@ DataSet<Tuple2<String, Integer>> output = combinedWords.reduceGroup(new GroupRed
 
 ~~~scala
 val input: DataSet[String] = [..] // The words received as input
-val groupedInput: DataSet[String] = input.groupBy(0)
 
-val combinedWords: DataSet[(String, Int)] = groupedInput.combineGroup {
+val combinedWords: DataSet[(String, Int)] = input
+  .groupBy(0)
+  .combineGroup {
     (words, out: Collector[(String, Int)]) =>
         var count = 0
         for (word <- words) {
@@ -722,9 +717,9 @@ val combinedWords: DataSet[(String, Int)] = groupedInput.combineGroup {
         out.collect(word, count)
 }
 
-val groupedCombinedWords: DataSet[(String, Int)] = combinedWords.groupBy(0)
-
-val output: DataSet[(String, Int)] = groupedInput.reduceGroup {
+val output: DataSet[(String, Int)] = combinedWords
+  .groupBy(0)
+  .reduceGroup {
     (words, out: Collector[(String, Int)]) =>
         var count = 0
         for ((word, Int) <- words) {

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/docs/apis/programming_guide.md
----------------------------------------------------------------------
diff --git a/docs/apis/programming_guide.md b/docs/apis/programming_guide.md
index a89f736..102b137 100644
--- a/docs/apis/programming_guide.md
+++ b/docs/apis/programming_guide.md
@@ -120,17 +120,17 @@ manually create the project, you can use the archetype and create a project by c
 <div class="codetabs" markdown="1">
 <div data-lang="java" markdown="1">
 {% highlight bash %}
-mvn archetype:generate /
-    -DarchetypeGroupId=org.apache.flink/
-    -DarchetypeArtifactId=flink-quickstart-java /
+mvn archetype:generate \
+    -DarchetypeGroupId=org.apache.flink \
+    -DarchetypeArtifactId=flink-quickstart-java \
     -DarchetypeVersion={{site.version }}
 {% endhighlight %}
 </div>
 <div data-lang="scala" markdown="1">
 {% highlight bash %}
-mvn archetype:generate /
-    -DarchetypeGroupId=org.apache.flink/
-    -DarchetypeArtifactId=flink-quickstart-scala /
+mvn archetype:generate \
+    -DarchetypeGroupId=org.apache.flink \
+    -DarchetypeArtifactId=flink-quickstart-scala \
     -DarchetypeVersion={{site.version }}
 {% endhighlight %}
 </div>

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-core/src/main/java/org/apache/flink/api/common/accumulators/Accumulator.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/accumulators/Accumulator.java b/flink-core/src/main/java/org/apache/flink/api/common/accumulators/Accumulator.java
index e49cc04..2ee5fa0 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/accumulators/Accumulator.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/accumulators/Accumulator.java
@@ -25,7 +25,7 @@ import java.io.Serializable;
  * and operators. Each parallel instance creates and updates its own accumulator object,
  * and the different parallel instances of the accumulator are later merged.
  * merged by the system at the end of the job. The result can be obtained from the
- * result of a job execution, or from teh web runtime monitor.
+ * result of a job execution, or from the web runtime monitor.
  *
  * The accumulators are inspired by the Hadoop/MapReduce counters.
  * 

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-core/src/main/java/org/apache/flink/api/common/accumulators/AverageAccumulator.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/accumulators/AverageAccumulator.java b/flink-core/src/main/java/org/apache/flink/api/common/accumulators/AverageAccumulator.java
index 5ed3c26..0db1942 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/accumulators/AverageAccumulator.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/accumulators/AverageAccumulator.java
@@ -19,8 +19,8 @@
 package org.apache.flink.api.common.accumulators;
 
 /**
- * An accumulator that get the average values.
- * Input can be {@code long}, {@code integer}, {@code double} as the result is {@code double}.
+ * An accumulator that computes the average value.
+ * Input can be {@code long}, {@code integer}, or {@code double} and the result is {@code double}.
  */
 public class AverageAccumulator implements SimpleAccumulator<Double> {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeComparator.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeComparator.java b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeComparator.java
index d017694..3a545e4 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeComparator.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeComparator.java
@@ -38,7 +38,7 @@ import org.apache.flink.core.memory.MemorySegment;
  * Implementing classes are stateful, because several methods require to set one record as the reference for
  * comparisons and later comparing a candidate against it. Therefore, the classes implementing this interface are
  * not thread safe. The runtime will ensure that no instance is used twice in different threads, but will create
- * a copy for that purpose. It is hence imperative that the copied created by the {@link #duplicate()} method
+ * a copy for that purpose. It is hence imperative that the copies created by the {@link #duplicate()} method
  * share no state with the instance from which they were copied: They have to be deep copies.  
  *
  * @see java.lang.Object#hashCode()
@@ -60,7 +60,7 @@ public abstract class TypeComparator<T> implements Serializable {
 	 * results in a rather uniform value distribution.
 	 * However, any collisions produced by this method cannot be undone. While it is NOT
 	 * important to create hash codes that cover the full spectrum of bits in the integer, it IS important 
-	 * to avoid collisions when combining two value as good as possible.
+	 * to avoid collisions when combining two value as much as possible.
 	 * 
 	 * @param record The record to be hashed.
 	 * @return A hash value for the record.

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-examples/flink-java-examples/src/main/java/org/apache/flink/examples/java/relational/EmptyFieldsCountAccumulator.java
----------------------------------------------------------------------
diff --git a/flink-examples/flink-java-examples/src/main/java/org/apache/flink/examples/java/relational/EmptyFieldsCountAccumulator.java b/flink-examples/flink-java-examples/src/main/java/org/apache/flink/examples/java/relational/EmptyFieldsCountAccumulator.java
index 7fd5799..e7ac474 100644
--- a/flink-examples/flink-java-examples/src/main/java/org/apache/flink/examples/java/relational/EmptyFieldsCountAccumulator.java
+++ b/flink-examples/flink-java-examples/src/main/java/org/apache/flink/examples/java/relational/EmptyFieldsCountAccumulator.java
@@ -183,9 +183,8 @@ public class EmptyFieldsCountAccumulator {
 	}
 
 	/**
-	 * This accumulator lets you increase vector components distributedly. The {@link #add(Integer)} method lets you
-	 * increase the <i>n</i>-th vector component by 1, whereat <i>n</i> is the methods parameter. The size of the vector
-	 * is automatically managed.
+	 * This accumulator maintains a vector of counts. Calling {@link #add(Integer)} increments the
+	 * <i>n</i>-th vector component. The size of the vector is automatically managed.
 	 */
 	public static class VectorAccumulator implements Accumulator<Integer, ArrayList<Integer>> {
 

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-java/src/main/java/org/apache/flink/api/java/ExecutionEnvironment.java
----------------------------------------------------------------------
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/ExecutionEnvironment.java b/flink-java/src/main/java/org/apache/flink/api/java/ExecutionEnvironment.java
index aa7c0c4..283d6d4 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/ExecutionEnvironment.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/ExecutionEnvironment.java
@@ -791,7 +791,7 @@ public abstract class ExecutionEnvironment {
 	
 	/**
 	 * Creates a new data set that contains a sequence of numbers. The data set will be created in parallel,
-	 * so there is no guarantee about the oder of the elements.
+	 * so there is no guarantee about the order of the elements.
 	 * 
 	 * @param from The number to start at (inclusive).
 	 * @param to The number to stop at (inclusive).

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/Tuple3UnwrappingIterator.java
----------------------------------------------------------------------
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/Tuple3UnwrappingIterator.java b/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/Tuple3UnwrappingIterator.java
index 7e054dd..2ee55bf 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/Tuple3UnwrappingIterator.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/operators/translation/Tuple3UnwrappingIterator.java
@@ -24,7 +24,7 @@ import org.apache.flink.api.java.tuple.Tuple3;
 import org.apache.flink.util.TraversableOnceException;
 
 /**
- * An iterator that reads 3-tuples (groupKey, sortKey, value) and returns only the values (thrid field).
+ * An iterator that reads 3-tuples (groupKey, sortKey, value) and returns only the values (third field).
  * The iterator also tracks the groupKeys, as the triples flow though it.
  */
 public class Tuple3UnwrappingIterator<T, K1, K2> implements Iterator<T>, Iterable<T>, java.io.Serializable {

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java
----------------------------------------------------------------------
diff --git a/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java b/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java
index afd0682..c9140a5 100644
--- a/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java
+++ b/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java
@@ -150,7 +150,7 @@ public class JobGraphGenerator implements Visitor<PlanNode> {
 	
 	public JobGraphGenerator(Configuration config) {
 		this.defaultMaxFan = config.getInteger(ConfigConstants.DEFAULT_SPILLING_MAX_FAN_KEY, 
-				ConfigConstants.DEFAULT_SPILLING_MAX_FAN);
+			ConfigConstants.DEFAULT_SPILLING_MAX_FAN);
 		this.defaultSortSpillingThreshold = config.getFloat(ConfigConstants.DEFAULT_SORT_SPILLING_THRESHOLD_KEY,
 			ConfigConstants.DEFAULT_SORT_SPILLING_THRESHOLD);
 	}

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobClient.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobClient.java b/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobClient.java
index 0105632..b908eb1 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobClient.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/client/JobClient.java
@@ -190,7 +190,7 @@ public class JobClient {
 
 	/**
 	 * Submits a job in detached mode. The method sends the JobGraph to the
-	 * JobManager and waits for the answer whether teh job could be started or not.
+	 * JobManager and waits for the answer whether the job could be started or not.
 	 *
 	 * @param jobManagerGateway Gateway to the JobManager which will execute the jobs
 	 * @param jobGraph The job

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/AsynchronousFileIOChannel.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/AsynchronousFileIOChannel.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/AsynchronousFileIOChannel.java
index aefeddb..a41be64 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/AsynchronousFileIOChannel.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/AsynchronousFileIOChannel.java
@@ -467,7 +467,7 @@ final class FileSegmentReadRequest implements ReadRequest {
 
 			fileSegment = new FileSegment(fileChannel, position, length, isBuffer);
 
-			// Skip the binary dataa
+			// Skip the binary data
 			fileChannel.position(position + length);
 
 			hasReachedEndOfFile.set(fileChannel.size() - fileChannel.position() == 0);

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/IterationIntermediateTask.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/IterationIntermediateTask.java b/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/IterationIntermediateTask.java
index 60f0dcf..0d266c2 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/IterationIntermediateTask.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/iterative/task/IterationIntermediateTask.java
@@ -33,7 +33,7 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 
 /**
- * An intermediate iteration task, which runs a Driver}inside.
+ * An intermediate iteration task, which runs a {@link org.apache.flink.runtime.operators.PactDriver} inside.
  * <p>
  * It will propagate {@link EndOfSuperstepEvent}s and {@link TerminationEvent}s to it's connected tasks. Furthermore
  * intermediate tasks can also update the iteration state, either the workset or the solution set.

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java
index 3f94109..c570458 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java
@@ -1189,7 +1189,7 @@ public class BatchTask<S extends Function, OT> extends AbstractInvokable impleme
 	/**
 	 * Creates the {@link Collector} for the given task, as described by the given configuration. The
 	 * output collector contains the writers that forward the data to the different tasks that the given task
-	 * is connected to. Each writer applies a the partitioning as described in the configuration.
+	 * is connected to. Each writer applies the partitioning as described in the configuration.
 	 *
 	 * @param task The task that the output collector is created for.
 	 * @param config The configuration describing the output shipping strategies.

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java
index efaceea..0bf4433 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java
@@ -49,7 +49,7 @@ import org.apache.flink.util.MutableObjectIterator;
  * spilling contents to disk, when the memory is not sufficient. It does not need to know a priori 
  * how large the input will be.
  * 
- * <p>The design of this class follows on many parts the design presented in
+ * <p>The design of this class follows in many parts the design presented in
  * "Hash joins and hash teams in Microsoft SQL Server", by Goetz Graefe et al. In its current state, the
  * implementation lacks features like dynamic role reversal, partition tuning, or histogram guided partitioning.</p>
  * 

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java
index 8fd9188..82d3868 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java
@@ -33,7 +33,7 @@ import java.util.Queue;
 
 /**
  * This class implements a priority-queue, which maintains a partial
- * ordering of its elements such that the+ least element can always be found
+ * ordering of its elements such that the least element can always be found
  * in constant time. Put()'s and pop()'s require log(size) time.
  * 
  */

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/flink-runtime/src/main/scala/org/apache/flink/runtime/taskmanager/TaskManager.scala
----------------------------------------------------------------------
diff --git a/flink-runtime/src/main/scala/org/apache/flink/runtime/taskmanager/TaskManager.scala b/flink-runtime/src/main/scala/org/apache/flink/runtime/taskmanager/TaskManager.scala
index d9d9596..41533ea 100644
--- a/flink-runtime/src/main/scala/org/apache/flink/runtime/taskmanager/TaskManager.scala
+++ b/flink-runtime/src/main/scala/org/apache/flink/runtime/taskmanager/TaskManager.scala
@@ -1943,7 +1943,7 @@ object TaskManager {
    * directories (not files), and are writable.
    *
    * @param tmpDirs The array of directory paths to check.
-   * @throws Exception Thrown if any of the directories doe not exist or is not writable
+   * @throws Exception Thrown if any of the directories does not exist or is not writable
    *                   or is a file, rather than a directory.
    */
   @throws(classOf[IOException])

http://git-wip-us.apache.org/repos/asf/flink/blob/6666ea58/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 00952c7..cf1a917 100644
--- a/pom.xml
+++ b/pom.xml
@@ -810,7 +810,7 @@ under the License.
 				<configuration>
 					<source>1.7</source>
 					<target>1.7</target>
-					<!-- The output of Xlint is not show by default, but we activate it for the QA bot
+					<!-- The output of Xlint is not shown by default, but we activate it for the QA bot
 					to be able to get more warnings -->
 					<compilerArgument>-Xlint:all</compilerArgument>
 				</configuration>