You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@beam.apache.org by dh...@apache.org on 2016/10/11 23:39:26 UTC
[1/3] incubator-beam git commit: checkstyle: improve Javadoc checking
Repository: incubator-beam
Updated Branches:
refs/heads/master 7c2124ba4 -> 135790bc9
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineFns.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineFns.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineFns.java
index 6f05993..229b1d2 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineFns.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineFns.java
@@ -67,7 +67,7 @@ public class CombineFns {
* <p>The same {@link TupleTag} cannot be used in a composition multiple times.
*
* <p>Example:
- * <pre><code>
+ * <pre>{@code
* PCollection<KV<K, Integer>> latencies = ...;
*
* TupleTag<Integer> maxLatencyTag = new TupleTag<Integer>();
@@ -97,7 +97,7 @@ public class CombineFns {
* c.output(...some T...);
* }
* }));
- * </code></pre>
+ * }</pre>
*/
public static ComposeKeyedCombineFnBuilder composeKeyed() {
return new ComposeKeyedCombineFnBuilder();
@@ -110,7 +110,7 @@ public class CombineFns {
* <p>The same {@link TupleTag} cannot be used in a composition multiple times.
*
* <p>Example:
- * <pre><code>
+ * <pre><{@code
* PCollection<Integer> globalLatencies = ...;
*
* TupleTag<Integer> maxLatencyTag = new TupleTag<Integer>();
@@ -140,7 +140,7 @@ public class CombineFns {
* c.output(...some T...);
* }
* }));
- * </code></pre>
+ * }</pre>
*/
public static ComposeCombineFnBuilder compose() {
return new ComposeCombineFnBuilder();
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineWithContext.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineWithContext.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineWithContext.java
index 9722360..3dd4fe2 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineWithContext.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineWithContext.java
@@ -65,7 +65,7 @@ public class CombineWithContext {
* A combine function that has access to {@code PipelineOptions} and side inputs through
* {@code CombineWithContext.Context}.
*
- * See the equivalent {@link CombineFn} for details about combine functions.
+ * <p>See the equivalent {@link CombineFn} for details about combine functions.
*/
public abstract static class CombineFnWithContext<InputT, AccumT, OutputT>
extends CombineFnBase.AbstractGlobalCombineFn<InputT, AccumT, OutputT>
@@ -182,7 +182,7 @@ public class CombineWithContext {
* A keyed combine function that has access to {@code PipelineOptions} and side inputs through
* {@code CombineWithContext.Context}.
*
- * See the equivalent {@link KeyedCombineFn} for details about keyed combine functions.
+ * <p>See the equivalent {@link KeyedCombineFn} for details about keyed combine functions.
*/
public abstract static class KeyedCombineFnWithContext<K, InputT, AccumT, OutputT>
extends CombineFnBase.AbstractPerKeyCombineFn<K, InputT, AccumT, OutputT>
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/GroupByKey.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/GroupByKey.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/GroupByKey.java
index 3a3da65..eaf68b2 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/GroupByKey.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/GroupByKey.java
@@ -63,7 +63,7 @@ import org.apache.beam.sdk.values.PCollection.IsBounded;
* {@code Coder} of the values of the input.
*
* <p>Example of use:
- * <pre><code>
+ * <pre>{@code
* PCollection<KV<String, Doc>> urlDocPairs = ...;
* PCollection<KV<String, Iterable<Doc>>> urlToDocs =
* urlDocPairs.apply(GroupByKey.<String, Doc>create());
@@ -75,7 +75,7 @@ import org.apache.beam.sdk.values.PCollection.IsBounded;
* Iterable<Doc> docsWithThatUrl = c.element().getValue();
* ... process all docs having that url ...
* }}));
- * </code></pre>
+ * }</pre>
*
* <p>{@code GroupByKey} is a key primitive in data-parallel
* processing, since it is the main way to efficiently bring
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Latest.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Latest.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Latest.java
index 7f13649..83cceca 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Latest.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Latest.java
@@ -36,15 +36,15 @@ import org.apache.beam.sdk.values.TimestampedValue;
* in a {@link PCollection}.
*
* <p>Example 1: compute the latest value for each session:
- * <pre><code>
+ * <pre>{@code
* PCollection<Long> input = ...;
* PCollection<Long> sessioned = input
* .apply(Window.<Long>into(Sessions.withGapDuration(Duration.standardMinutes(5)));
* PCollection<Long> latestValues = sessioned.apply(Latest.<Long>globally());
- * </code></pre>
+ * }</pre>
*
* <p>Example 2: track a latest computed value in an aggregator:
- * <pre><code>
+ * <pre>{@code
* class MyDoFn extends DoFn<String, String> {
* private Aggregator<TimestampedValue<Double>, Double> latestValue =
* createAggregator("latestValue", new Latest.LatestFn<Double>());
@@ -56,7 +56,7 @@ import org.apache.beam.sdk.values.TimestampedValue;
* // ..
* }
* }
- * </code></pre>
+ * }</pre>
*
* <p>For elements with the same timestamp, the element chosen for output is arbitrary.
*/
@@ -135,8 +135,8 @@ public class Latest {
}
/**
- * Returns a {@link PTransform} that takes as input a {@link PCollection<T>} and returns a
- * {@link PCollection<T>} whose contents is the latest element according to its event time, or
+ * Returns a {@link PTransform} that takes as input a {@code PCollection<T>} and returns a
+ * {@code PCollection<T>} whose contents is the latest element according to its event time, or
* {@literal null} if there are no elements.
*
* @param <T> The type of the elements being combined.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/OldDoFn.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/OldDoFn.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/OldDoFn.java
index 474efef..87c7095 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/OldDoFn.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/OldDoFn.java
@@ -299,7 +299,7 @@ public abstract class OldDoFn<InputT, OutputT> implements Serializable, HasDispl
* timestamps can only be shifted forward to future. For infinite
* skew, return {@code Duration.millis(Long.MAX_VALUE)}.
*
- * <p> Note that producing an element whose timestamp is less than the
+ * <p>Note that producing an element whose timestamp is less than the
* current timestamp may result in late data, i.e. returning a non-zero
* value here does not impact watermark calculations used for firing
* windows.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/PTransform.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/PTransform.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/PTransform.java
index 4a58141..2544a27 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/PTransform.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/PTransform.java
@@ -275,9 +275,9 @@ public abstract class PTransform<InputT extends PInput, OutputT extends POutput>
* Returns the default {@code Coder} to use for the output of this
* single-output {@code PTransform} when applied to the given input.
*
- * @throws CannotProvideCoderException if none can be inferred.
- *
* <p>By default, always throws.
+ *
+ * @throws CannotProvideCoderException if none can be inferred.
*/
protected Coder<?> getDefaultOutputCoder(@SuppressWarnings("unused") InputT input)
throws CannotProvideCoderException {
@@ -288,9 +288,9 @@ public abstract class PTransform<InputT extends PInput, OutputT extends POutput>
* Returns the default {@code Coder} to use for the given output of
* this single-output {@code PTransform} when applied to the given input.
*
- * @throws CannotProvideCoderException if none can be inferred.
- *
* <p>By default, always throws.
+ *
+ * @throws CannotProvideCoderException if none can be inferred.
*/
public <T> Coder<T> getDefaultOutputCoder(
InputT input, @SuppressWarnings("unused") TypedPValue<T> output)
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ParDo.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ParDo.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ParDo.java
index f9cb557..9d4c9a7 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ParDo.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ParDo.java
@@ -84,14 +84,14 @@ import org.apache.beam.sdk.values.TypedPValue;
* provided, will be called on the discarded instance.</li>
* </ol>
*
- * Each of the calls to any of the {@link DoFn DoFn's} processing
+ * <p>Each of the calls to any of the {@link DoFn DoFn's} processing
* methods can produce zero or more output elements. All of the
* of output elements from all of the {@link DoFn} instances
* are included in the output {@link PCollection}.
*
* <p>For example:
*
- * <pre><code>
+ * <pre><{@code
* PCollection<String> lines = ...;
* PCollection<String> words =
* lines.apply(ParDo.of(new DoFn<String, String>() {
@@ -110,7 +110,7 @@ import org.apache.beam.sdk.values.TypedPValue;
* Integer length = word.length();
* c.output(length);
* }}));
- * </code></pre>
+ * }</pre>
*
* <p>Each output element has the same timestamp and is in the same windows
* as its corresponding input element, and the output {@code PCollection}
@@ -146,7 +146,7 @@ import org.apache.beam.sdk.values.TypedPValue;
* the {@link DoFn} operations via {@link DoFn.ProcessContext#sideInput sideInput}.
* For example:
*
- * <pre><code>
+ * <pre>{@code
* PCollection<String> words = ...;
* PCollection<Integer> maxWordLengthCutOff = ...; // Singleton PCollection
* final PCollectionView<Integer> maxWordLengthCutOffView =
@@ -162,7 +162,7 @@ import org.apache.beam.sdk.values.TypedPValue;
* c.output(word);
* }
* }}));
- * </code></pre>
+ * }</pre>
*
* <h2>Side Outputs</h2>
*
@@ -179,7 +179,7 @@ import org.apache.beam.sdk.values.TypedPValue;
* {@link DoFn.Context#output}, while an element is added to a side output
* {@link PCollection} using {@link DoFn.Context#sideOutput}. For example:
*
- * <pre><code>
+ * <pre>{@code
* PCollection<String> words = ...;
* // Select words whose length is below a cut off,
* // plus the lengths of words that are above the cut off.
@@ -230,7 +230,7 @@ import org.apache.beam.sdk.values.TypedPValue;
* results.get(wordLengthsAboveCutOffTag);
* PCollection<String> markedWords =
* results.get(markedWordsTag);
- * </code></pre>
+ * }</pre>
*
* <h2>Properties May Be Specified In Any Order</h2>
*
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/RemoveDuplicates.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/RemoveDuplicates.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/RemoveDuplicates.java
index 2744b14..709aa4a 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/RemoveDuplicates.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/RemoveDuplicates.java
@@ -105,7 +105,7 @@ public class RemoveDuplicates<T> extends PTransform<PCollection<T>,
* A {@link RemoveDuplicates} {@link PTransform} that uses a {@link SerializableFunction} to
* obtain a representative value for each input element.
*
- * Construct via {@link RemoveDuplicates#withRepresentativeValueFn(SerializableFunction)}.
+ * <p>Construct via {@link RemoveDuplicates#withRepresentativeValueFn(SerializableFunction)}.
*
* @param <T> the type of input and output element
* @param <IdT> the type of representative values used to dedup
@@ -143,7 +143,8 @@ public class RemoveDuplicates<T> extends PTransform<PCollection<T>,
* Return a {@code WithRepresentativeValues} {@link PTransform} that is like this one, but with
* the specified output type descriptor.
*
- * Required for use of {@link RemoveDuplicates#withRepresentativeValueFn(SerializableFunction)}
+ * <p>Required for use of
+ * {@link RemoveDuplicates#withRepresentativeValueFn(SerializableFunction)}
* in Java 8 with a lambda as the fn.
*
* @param type a {@link TypeDescriptor} describing the representative type of this
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ViewFn.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ViewFn.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ViewFn.java
index 767e58e..981d047 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ViewFn.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ViewFn.java
@@ -34,7 +34,7 @@ import org.apache.beam.sdk.values.PCollectionView;
* available in the SDK.
*
* @param <PrimitiveViewT> the type of the underlying primitive view, provided by the runner
- * <ViewT> the type of the value(s) accessible via this {@link PCollectionView}
+ * {@code <ViewT>} the type of the value(s) accessible via this {@link PCollectionView}
*/
public abstract class ViewFn<PrimitiveViewT, ViewT> implements Serializable {
/**
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/WithKeys.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/WithKeys.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/WithKeys.java
index 8b061f6..de28ecb 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/WithKeys.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/WithKeys.java
@@ -99,7 +99,7 @@ public class WithKeys<K, V> extends PTransform<PCollection<V>,
/**
* Return a {@link WithKeys} that is like this one with the specified key type descriptor.
*
- * For use with lambdas in Java 8, either this method must be called with an appropriate type
+ * <p>For use with lambdas in Java 8, either this method must be called with an appropriate type
* descriptor or {@link PCollection#setCoder(Coder)} must be called on the output
* {@link PCollection}.
*/
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/display/DisplayData.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/display/DisplayData.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/display/DisplayData.java
index 0b92d9f..394666b 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/display/DisplayData.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/display/DisplayData.java
@@ -163,7 +163,7 @@ public class DisplayData implements Serializable {
* }
* </code></pre>
*
- * Using {@code include(subcomponent)} will associate each of the registered items with the
+ * <p>Using {@code include(subcomponent)} will associate each of the registered items with the
* namespace of the {@code subcomponent} being registered. To register display data in the
* current namespace, such as from a base class implementation, use
* {@code subcomponent.populateDisplayData(builder)} instead.
@@ -224,7 +224,7 @@ public class DisplayData implements Serializable {
/**
* The key for the display item. Each display item is created with a key and value
- * via {@link DisplayData#item).
+ * via {@link DisplayData#item}.
*/
@JsonGetter("key")
public abstract String getKey();
@@ -254,8 +254,8 @@ public class DisplayData implements Serializable {
* value. For example, the {@link #getValue() value} for {@link Type#JAVA_CLASS} items contains
* the full class name with package, while the short value contains just the class name.
*
- * A {@link #getValue() value} will be provided for each display item, and some types may also
- * provide a short-value. If a short value is provided, display data consumers may
+ * <p>A {@link #getValue() value} will be provided for each display item, and some types may
+ * also provide a short-value. If a short value is provided, display data consumers may
* choose to display it instead of or in addition to the {@link #getValue() value}.
*/
@JsonGetter("shortValue")
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/reflect/DoFnInvoker.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/reflect/DoFnInvoker.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/reflect/DoFnInvoker.java
index 9de6759..eb6961c 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/reflect/DoFnInvoker.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/reflect/DoFnInvoker.java
@@ -22,7 +22,7 @@ import org.apache.beam.sdk.transforms.DoFn;
/**
* Interface for invoking the {@code DoFn} processing methods.
*
- * Instantiating a {@link DoFnInvoker} associates it with a specific {@link DoFn} instance,
+ * <p>Instantiating a {@link DoFnInvoker} associates it with a specific {@link DoFn} instance,
* referred to as the bound {@link DoFn}.
*/
public interface DoFnInvoker<InputT, OutputT> {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/AfterWatermark.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/AfterWatermark.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/AfterWatermark.java
index 9690be8..e2463d8 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/AfterWatermark.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/AfterWatermark.java
@@ -30,7 +30,7 @@ import org.apache.beam.sdk.util.TimeDomain;
import org.joda.time.Instant;
/**
- * <p>{@code AfterWatermark} triggers fire based on progress of the system watermark. This time is a
+ * {@code AfterWatermark} triggers fire based on progress of the system watermark. This time is a
* lower-bound, sometimes heuristically established, on event times that have been fully processed
* by the pipeline.
*
@@ -54,7 +54,7 @@ import org.joda.time.Instant;
*
* <p>The watermark is the clock that defines {@link TimeDomain#EVENT_TIME}.
*
- * Additionaly firings before or after the watermark can be requested by calling
+ * <p>Additionaly firings before or after the watermark can be requested by calling
* {@code AfterWatermark.pastEndOfWindow.withEarlyFirings(OnceTrigger)} or
* {@code AfterWatermark.pastEndOfWindow.withEarlyFirings(OnceTrigger)}.
*/
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Never.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Never.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Never.java
index e1f5d4d..5f20465 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Never.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Never.java
@@ -25,8 +25,7 @@ import org.joda.time.Instant;
/**
* A trigger which never fires.
*
- * <p>
- * Using this trigger will only produce output when the watermark passes the end of the
+ * <p>Using this trigger will only produce output when the watermark passes the end of the
* {@link BoundedWindow window} plus the {@link Window#withAllowedLateness allowed
* lateness}.
*/
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/PaneInfo.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/PaneInfo.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/PaneInfo.java
index 727a492..7e712b2 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/PaneInfo.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/PaneInfo.java
@@ -94,7 +94,7 @@ public final class PaneInfo {
* And a {@code LATE} pane cannot contain locally on-time elements.
* </ol>
*
- * However, note that:
+ * <p>However, note that:
* <ol>
* <li> An {@code ON_TIME} pane may contain locally late elements. It may even contain only
* locally late elements. Provided a locally late element finds its way into an {@code ON_TIME}
@@ -256,7 +256,7 @@ public final class PaneInfo {
/**
* The zero-based index of this trigger firing among non-speculative panes.
*
- * <p> This will return 0 for the first non-{@link Timing#EARLY} timer firing, 1 for the next one,
+ * <p>This will return 0 for the first non-{@link Timing#EARLY} timer firing, 1 for the next one,
* etc.
*
* <p>Always -1 for speculative data.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/SlidingWindows.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/SlidingWindows.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/SlidingWindows.java
index ec21723..1eb56f7 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/SlidingWindows.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/SlidingWindows.java
@@ -186,8 +186,7 @@ public class SlidingWindows extends NonMergingWindowFn<Object, IntervalWindow> {
/**
* Ensures that later sliding windows have an output time that is past the end of earlier windows.
*
- * <p>
- * If this is the earliest sliding window containing {@code inputTimestamp}, that's fine.
+ * <p>If this is the earliest sliding window containing {@code inputTimestamp}, that's fine.
* Otherwise, we pick the earliest time that doesn't overlap with earlier windows.
*/
@Experimental(Kind.OUTPUT_TIME)
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Window.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Window.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Window.java
index 52b7858..57f7716 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Window.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Window.java
@@ -40,11 +40,11 @@ import org.joda.time.Duration;
* {@link org.apache.beam.sdk.transforms.GroupByKey GroupByKeys},
* including one within composite transforms, will group by the combination of
* keys and windows.
-
+ *
* <p>See {@link org.apache.beam.sdk.transforms.GroupByKey}
* for more information about how grouping with windows works.
*
- * <h2> Windowing </h2>
+ * <h2>Windowing</h2>
*
* <p>Windowing a {@code PCollection} divides the elements into windows based
* on the associated event time for each element. This is especially useful
@@ -58,13 +58,13 @@ import org.joda.time.Duration;
* The following example demonstrates how to use {@code Window} in a pipeline
* that counts the number of occurrences of strings each minute:
*
- * <pre> {@code
+ * <pre>{@code
* PCollection<String> items = ...;
* PCollection<String> windowed_items = items.apply(
* Window.<String>into(FixedWindows.of(Duration.standardMinutes(1))));
* PCollection<KV<String, Long>> windowed_counts = windowed_items.apply(
* Count.<String>perElement());
- * } </pre>
+ * }</pre>
*
* <p>Let (data, timestamp) denote a data element along with its timestamp.
* Then, if the input to this pipeline consists of
@@ -83,7 +83,7 @@ import org.joda.time.Duration;
* <p>Additionally, custom {@link WindowFn}s can be created, by creating new
* subclasses of {@link WindowFn}.
*
- * <h2> Triggers </h2>
+ * <h2>Triggers</h2>
*
* <p>{@link Window.Bound#triggering(Trigger)} allows specifying a trigger to control when
* (in processing time) results for the given window can be produced. If unspecified, the default
@@ -103,7 +103,7 @@ import org.joda.time.Duration;
* (The use of watermark time to stop processing tends to be more robust if the data source is slow
* for a few days, etc.)
*
- * <pre> {@code
+ * <pre>{@code
* PCollection<String> items = ...;
* PCollection<String> windowed_items = items.apply(
* Window.<String>into(FixedWindows.of(Duration.standardMinutes(1)))
@@ -114,12 +114,12 @@ import org.joda.time.Duration;
* .withAllowedLateness(Duration.standardDays(1)));
* PCollection<KV<String, Long>> windowed_counts = windowed_items.apply(
* Count.<String>perElement());
- * } </pre>
+ * }</pre>
*
* <p>On the other hand, if we wanted to get early results every minute of processing
* time (for which there were new elements in the given window) we could do the following:
*
- * <pre> {@code
+ * <pre>{@code
* PCollection<String> windowed_items = items.apply(
* Window.<String>into(FixedWindows.of(Duration.standardMinutes(1))
* .triggering(
@@ -128,7 +128,7 @@ import org.joda.time.Duration;
* .withEarlyFirings(AfterProcessingTime
* .pastFirstElementInPane().plusDelayOf(Duration.standardMinutes(1))))
* .withAllowedLateness(Duration.ZERO));
- * } </pre>
+ * }</pre>
*
* <p>After a {@link org.apache.beam.sdk.transforms.GroupByKey} the trigger is set to
* a trigger that will preserve the intent of the upstream trigger. See
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/BaseExecutionContext.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/BaseExecutionContext.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/BaseExecutionContext.java
index 9ee55ad..45bbe75 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/BaseExecutionContext.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/BaseExecutionContext.java
@@ -37,12 +37,12 @@ import org.apache.beam.sdk.values.TupleTag;
* <p>BaseExecutionContext is generic to allow implementing subclasses to return a concrete subclass
* of {@link StepContext} from {@link #getOrCreateStepContext(String, String)} and
* {@link #getAllStepContexts()} without forcing each subclass to override the method, e.g.
- * <pre>
+ * <pre>{@code
* @Override
* StreamingModeExecutionContext.StepContext getOrCreateStepContext(...) {
* return (StreamingModeExecutionContext.StepContext) super.getOrCreateStepContext(...);
* }
- * </pre>
+ * }</pre>
*
* <p>When a subclass of {@code BaseExecutionContext} has been downcast, the return types of
* {@link #createStepContext(String, String)},
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/ExposedByteArrayOutputStream.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/ExposedByteArrayOutputStream.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/ExposedByteArrayOutputStream.java
index 5a98f84..e2c7e42 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/ExposedByteArrayOutputStream.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/ExposedByteArrayOutputStream.java
@@ -60,6 +60,7 @@ public class ExposedByteArrayOutputStream extends ByteArrayOutputStream {
* Write {@code b} to the stream and take the ownership of {@code b}.
* If the stream is empty, {@code b} itself will be used as the content of the stream and
* no content copy will be involved.
+ *
* <p><i>Note: After passing any byte array to this method, it must not be modified again.</i>
*
* @throws IOException
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/GatherAllPanes.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/GatherAllPanes.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/GatherAllPanes.java
index 0f2ecd0..a2a6e17 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/GatherAllPanes.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/GatherAllPanes.java
@@ -31,8 +31,7 @@ import org.apache.beam.sdk.values.TypeDescriptor;
/**
* Gathers all panes of each window into exactly one output.
*
- * <p>
- * Note that this will delay the output of a window until the garbage collection time (when the
+ * <p>Note that this will delay the output of a window until the garbage collection time (when the
* watermark passes the end of the window plus allowed lateness) even if the upstream triggers
* closed the window earlier.
*/
@@ -41,10 +40,9 @@ public class GatherAllPanes<T>
/**
* Gathers all panes of each window into a single output element.
*
- * <p>
- * This will gather all output panes into a single element, which causes them to be colocated on a
- * single worker. As a result, this is only suitable for {@link PCollection PCollections} where
- * all of the output elements for each pane fit in memory, such as in tests.
+ * <p>This will gather all output panes into a single element, which causes them to be colocated
+ * on a single worker. As a result, this is only suitable for {@link PCollection PCollections}
+ * where all of the output elements for each pane fit in memory, such as in tests.
*/
public static <T> GatherAllPanes<T> globally() {
return new GatherAllPanes<>();
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PerKeyCombineFnRunners.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PerKeyCombineFnRunners.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PerKeyCombineFnRunners.java
index c537eb3..35d0f2d 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PerKeyCombineFnRunners.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PerKeyCombineFnRunners.java
@@ -50,7 +50,7 @@ public class PerKeyCombineFnRunners {
/**
* An implementation of {@link PerKeyCombineFnRunner} with {@link KeyedCombineFn}.
*
- * It forwards functions calls to the {@link KeyedCombineFn}.
+ * <p>It forwards functions calls to the {@link KeyedCombineFn}.
*/
private static class KeyedCombineFnRunner<K, InputT, AccumT, OutputT>
implements PerKeyCombineFnRunner<K, InputT, AccumT, OutputT> {
@@ -146,7 +146,7 @@ public class PerKeyCombineFnRunners {
/**
* An implementation of {@link PerKeyCombineFnRunner} with {@link KeyedCombineFnWithContext}.
*
- * It forwards functions calls to the {@link KeyedCombineFnWithContext}.
+ * <p>It forwards functions calls to the {@link KeyedCombineFnWithContext}.
*/
private static class KeyedCombineFnWithContextRunner<K, InputT, AccumT, OutputT>
implements PerKeyCombineFnRunner<K, InputT, AccumT, OutputT> {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubClient.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubClient.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubClient.java
index bb6aa93..1ac5511 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubClient.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubClient.java
@@ -86,6 +86,7 @@ public abstract class PubsubClient implements Closeable {
/**
* Return the timestamp (in ms since unix epoch) to use for a Pubsub message with {@code
* attributes} and {@code pubsubTimestamp}.
+ *
* <p>If {@code timestampLabel} is non-{@literal null} then the message attributes must contain
* that label, and the value of that label will be taken as the timestamp.
* Otherwise the timestamp will be taken from the Pubsub publish timestamp {@code
@@ -299,6 +300,7 @@ public abstract class PubsubClient implements Closeable {
/**
* A message to be sent to Pubsub.
+ *
* <p>NOTE: This class is {@link Serializable} only to support the {@link PubsubTestClient}.
* Java serialization is never used for non-test clients.
*/
@@ -357,6 +359,7 @@ public abstract class PubsubClient implements Closeable {
/**
* A message received from Pubsub.
+ *
* <p>NOTE: This class is {@link Serializable} only to support the {@link PubsubTestClient}.
* Java serialization is never used for non-test clients.
*/
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubTestClient.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubTestClient.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubTestClient.java
index 6e5ba46..3fab151 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubTestClient.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubTestClient.java
@@ -43,7 +43,7 @@ public class PubsubTestClient extends PubsubClient {
/**
* Mimic the state of the simulated Pubsub 'service'.
*
- * Note that the {@link PubsubTestClientFactory} is serialized/deserialized even when running
+ * <p>Note that the {@link PubsubTestClientFactory} is serialized/deserialized even when running
* test pipelines. Meanwhile it is valid for multiple {@link PubsubTestClient}s to be created
* from the same client factory and run in parallel. Thus we can't enforce aliasing of the
* following data structures over all clients and must resort to a static.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/TimerInternals.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/TimerInternals.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/TimerInternals.java
index dd3b773..161037d 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/TimerInternals.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/TimerInternals.java
@@ -108,7 +108,7 @@ public interface TimerInternals {
* <li>However will never be behind the global input watermark for any following computation.
* </ol>
*
- * <p> In pictures:
+ * <p>In pictures:
* <pre>
* | | | | |
* | | D | C | B | A
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/values/PInput.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/values/PInput.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/values/PInput.java
index 98987cd..1b3791d 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/values/PInput.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/values/PInput.java
@@ -46,7 +46,7 @@ public interface PInput {
public Collection<? extends PValue> expand();
/**
- * <p>After building, finalizes this {@code PInput} to make it ready for
+ * After building, finalizes this {@code PInput} to make it ready for
* being used as an input to a {@link org.apache.beam.sdk.transforms.PTransform}.
*
* <p>Automatically invoked whenever {@code apply()} is invoked on
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/values/TypeDescriptors.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/values/TypeDescriptors.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/values/TypeDescriptors.java
index 498c3d0..7a78131 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/values/TypeDescriptors.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/values/TypeDescriptors.java
@@ -180,8 +180,8 @@ public class TypeDescriptors {
* <pre>
* new TypeDescriptor<KV<K,V>>() {};
* </pre>
- * <p>
- * Example of use:
+ *
+ * <p>Example of use:
* <pre>
* {@code
* PCollection<String> words = ...;
@@ -211,8 +211,8 @@ public class TypeDescriptors {
* <pre>
* new TypeDescriptor<Set<E>>() {};
* </pre>
- * <p>
- * Example of use:
+ *
+ * <p>Example of use:
* <pre>
* {@code
* PCollection<String> words = ...;
@@ -239,8 +239,8 @@ public class TypeDescriptors {
* <pre>
* new TypeDescriptor<List<E>>() {};
* </pre>
- * <p>
- * Example of use:
+ *
+ * <p>Example of use:
* <pre>
* {@code
* PCollection<String> words = ...;
@@ -267,8 +267,8 @@ public class TypeDescriptors {
* <pre>
* new TypeDescriptor<Iterable<E>>() {};
* </pre>
- * <p>
- * Example of use:
+ *
+ * <p>Example of use:
* <pre>
* {@code
* PCollection<String> words = ...;
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/test/java/org/apache/beam/sdk/testing/SystemNanoTimeSleeper.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/test/java/org/apache/beam/sdk/testing/SystemNanoTimeSleeper.java b/sdks/java/core/src/test/java/org/apache/beam/sdk/testing/SystemNanoTimeSleeper.java
index 810b6f1..2a321ec 100644
--- a/sdks/java/core/src/test/java/org/apache/beam/sdk/testing/SystemNanoTimeSleeper.java
+++ b/sdks/java/core/src/test/java/org/apache/beam/sdk/testing/SystemNanoTimeSleeper.java
@@ -28,7 +28,7 @@ import java.util.concurrent.locks.LockSupport;
* href="https://blogs.oracle.com/dholmes/entry/inside_the_hotspot_vm_clocks">
* article</a> goes into further detail about this issue.
*
- * This {@link Sleeper} uses {@link System#nanoTime}
+ * <p>This {@link Sleeper} uses {@link System#nanoTime}
* as the timing source and {@link LockSupport#parkNanos} as the wait method.
* Note that usage of this sleeper may impact performance because
* of the relatively more expensive methods being invoked when compared to
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/DoFnTesterTest.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/DoFnTesterTest.java b/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/DoFnTesterTest.java
index f208488..ac76b2e 100644
--- a/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/DoFnTesterTest.java
+++ b/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/DoFnTesterTest.java
@@ -365,7 +365,7 @@ public class DoFnTesterTest {
/**
* A {@link DoFn} that adds values to an aggregator and converts input to String in
- * {@link OldDoFn#processElement).
+ * {@link OldDoFn#processElement}.
*/
private static class CounterDoFn extends DoFn<Long, String> {
Aggregator<Long, Long> agg = createAggregator("ctr", new Sum.SumLongFn());
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryAvroUtils.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryAvroUtils.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryAvroUtils.java
index 7800108..6a9ea6b 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryAvroUtils.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryAvroUtils.java
@@ -84,7 +84,7 @@ class BigQueryAvroUtils {
/**
* Utility function to convert from an Avro {@link GenericRecord} to a BigQuery {@link TableRow}.
*
- * See <a href="https://cloud.google.com/bigquery/exporting-data-from-bigquery#config">
+ * <p>See <a href="https://cloud.google.com/bigquery/exporting-data-from-bigquery#config">
* "Avro format"</a> for more information.
*/
static TableRow convertGenericRecordToTableRow(GenericRecord record, TableSchema schema) {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java
index 5aa952c..5914ba2 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java
@@ -134,6 +134,7 @@ import org.slf4j.LoggerFactory;
* <a href="https://developers.google.com/bigquery/">BigQuery</a> tables.
*
* <h3>Table References</h3>
+ *
* <p>A fully-qualified BigQuery table name consists of three components:
* <ul>
* <li>{@code projectId}: the Cloud project id (defaults to
@@ -155,6 +156,7 @@ import org.slf4j.LoggerFactory;
* </ul>
*
* <h3>Reading</h3>
+ *
* <p>To read from a BigQuery table, apply a {@link BigQueryIO.Read} transformation.
* This produces a {@link PCollection} of {@link TableRow TableRows} as output:
* <pre>{@code
@@ -177,6 +179,7 @@ import org.slf4j.LoggerFactory;
* Pipeline construction will fail with a validation error if neither or both are specified.
*
* <h3>Writing</h3>
+ *
* <p>To write to a BigQuery table, apply a {@link BigQueryIO.Write} transformation.
* This consumes a {@link PCollection} of {@link TableRow TableRows} as input.
* <pre>{@code
@@ -200,6 +203,7 @@ import org.slf4j.LoggerFactory;
* {@link Write.WriteDisposition#WRITE_APPEND}.
*
* <h3>Sharding BigQuery output tables</h3>
+ *
* <p>A common use case is to dynamically generate BigQuery table names based on
* the current window. To support this,
* {@link BigQueryIO.Write#to(SerializableFunction)}
@@ -224,6 +228,7 @@ import org.slf4j.LoggerFactory;
* <p>Per-window tables are not yet supported in batch mode.
*
* <h3>Permissions</h3>
+ *
* <p>Permission requirements depend on the {@link PipelineRunner} that is used to execute the
* Dataflow job. Please refer to the documentation of corresponding {@link PipelineRunner}s for
* more details.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServices.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServices.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServices.java
index ca7e491..07dc06e 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServices.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServices.java
@@ -104,7 +104,7 @@ interface BigQueryServices extends Serializable {
/**
* Gets the specified {@link Job} by the given {@link JobReference}.
*
- * Returns null if the job is not found.
+ * <p>Returns null if the job is not found.
*/
Job getJob(JobReference jobRef) throws IOException, InterruptedException;
}
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreIO.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreIO.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreIO.java
index c50c23a..635e222 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreIO.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreIO.java
@@ -20,7 +20,7 @@ package org.apache.beam.sdk.io.gcp.datastore;
import org.apache.beam.sdk.annotations.Experimental;
/**
- * <p>{@link DatastoreIO} provides an API for reading from and writing to
+ * {@link DatastoreIO} provides an API for reading from and writing to
* <a href="https://developers.google.com/datastore/">Google Cloud Datastore</a> over different
* versions of the Cloud Datastore Client libraries.
*
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java
index 45871f1..45b2d6f 100644
--- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java
+++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java
@@ -87,7 +87,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * <p>{@link DatastoreV1} provides an API to Read, Write and Delete {@link PCollection PCollections}
+ * {@link DatastoreV1} provides an API to Read, Write and Delete {@link PCollection PCollections}
* of <a href="https://developers.google.com/datastore/">Google Cloud Datastore</a> version v1
* {@link Entity} objects.
*
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java
index 72ab7c2..4dd1608 100644
--- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java
+++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java
@@ -35,13 +35,13 @@ import org.junit.runners.JUnit4;
/**
* Integration tests for {@link DatastoreV1.Read.SplitQueryFn}.
*
- * <p> It is hard to mock the exact behavior of Cloud Datastore, especially for the statistics
+ * <p>It is hard to mock the exact behavior of Cloud Datastore, especially for the statistics
* queries. Also the fact that DatastoreIO falls back gracefully when querying statistics fails,
* makes it hard to catch these issues in production. This test here ensures we interact with
* the Cloud Datastore directly, query the actual stats and verify that the SplitQueryFn generates
* the expected number of query splits.
*
- * <p> These tests are brittle as they rely on statistics data in Cloud Datastore. If the data
+ * <p>These tests are brittle as they rely on statistics data in Cloud Datastore. If the data
* gets lost or changes then they will begin failing and this test should be disabled.
* At the time of writing, the Cloud Datastore has the following statistics,
* <ul>
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java
index 9998833..37ad064 100644
--- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java
+++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java
@@ -61,7 +61,7 @@ public class V1ReadIT {
/**
* An end-to-end test for {@link DatastoreV1.Read}.
*
- * Write some test entities to datastore and then run a dataflow pipeline that
+ * <p>Write some test entities to datastore and then run a dataflow pipeline that
* reads and counts the total number of entities. Verify that the count matches the
* number of entities written.
*/
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1WriteIT.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1WriteIT.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1WriteIT.java
index fa7c140..e97e80a 100644
--- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1WriteIT.java
+++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1WriteIT.java
@@ -54,7 +54,7 @@ public class V1WriteIT {
/**
* An end-to-end test for {@link DatastoreV1.Write}.
*
- * Write some test entities to datastore through a dataflow pipeline.
+ * <p>Write some test entities to Cloud Datastore.
* Read and count all the entities. Verify that the count matches the
* number of entities written.
*/
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSource.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSource.java b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSource.java
index c71a58c..5b0c5b6 100644
--- a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSource.java
+++ b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSource.java
@@ -85,7 +85,8 @@ import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
* }
* </pre>
*
- * Implementation note: Since Hadoop's {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat}
+ * <p>Implementation note: Since Hadoop's
+ * {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat}
* determines the input splits, this class extends {@link BoundedSource} rather than
* {@link org.apache.beam.sdk.io.OffsetBasedSource}, since the latter
* dictates input splits.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/WritableCoder.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/WritableCoder.java b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/WritableCoder.java
index f3569ea..96ba87a 100644
--- a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/WritableCoder.java
+++ b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/WritableCoder.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.io.Writable;
/**
* A {@code WritableCoder} is a {@link Coder} for a Java class that implements {@link Writable}.
*
- * <p> To use, specify the coder type on a PCollection:
+ * <p>To use, specify the coder type on a PCollection:
* <pre>
* {@code
* PCollection<MyRecord> records =
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthAvroHDFSFileSource.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthAvroHDFSFileSource.java b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthAvroHDFSFileSource.java
index d37ced9..547413f 100644
--- a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthAvroHDFSFileSource.java
+++ b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthAvroHDFSFileSource.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.mapreduce.InputSplit;
/**
* Source for Avros on Hadoop/HDFS with Simple Authentication.
*
- * Allows to set arbitrary username as HDFS user, which is used for reading Avro from HDFS.
+ * <p>Allows to set arbitrary username as HDFS user, which is used for reading Avro from HDFS.
*/
public class SimpleAuthAvroHDFSFileSource<T> extends AvroHDFSFileSource<T> {
// keep this field to pass Hadoop user between workers
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSink.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSink.java b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSink.java
index e2c2c90..28accfa 100644
--- a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSink.java
+++ b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSink.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.security.UserGroupInformation;
* A {@code Sink} for writing records to a Hadoop filesystem using a Hadoop file-based output
* format with Simple Authentication.
*
- * Allows arbitrary username as HDFS user, which is used for writing to HDFS.
+ * <p>Allows arbitrary username as HDFS user, which is used for writing to HDFS.
*
* @param <K> The type of keys to be written to the sink.
* @param <V> The type of values to be written to the sink.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSource.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSource.java b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSource.java
index d2cab57..22191f0 100644
--- a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSource.java
+++ b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSource.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
/**
* Source for Hadoop/HDFS with Simple Authentication.
*
- * Allows to set arbitrary username as HDFS user, which is used for reading from HDFS.
+ * <p>Allows to set arbitrary username as HDFS user, which is used for reading from HDFS.
*/
public class SimpleAuthHDFSFileSource<K, V> extends HDFSFileSource<K, V> {
private final String username;
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/jms/src/main/java/org/apache/beam/sdk/io/jms/JmsIO.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/jms/src/main/java/org/apache/beam/sdk/io/jms/JmsIO.java b/sdks/java/io/jms/src/main/java/org/apache/beam/sdk/io/jms/JmsIO.java
index 1c35f6e..00b91ad 100644
--- a/sdks/java/io/jms/src/main/java/org/apache/beam/sdk/io/jms/JmsIO.java
+++ b/sdks/java/io/jms/src/main/java/org/apache/beam/sdk/io/jms/JmsIO.java
@@ -81,7 +81,7 @@ import org.slf4j.LoggerFactory;
*
* <h3>Writing to a JMS destination</h3>
*
- * JmsIO sink supports writing text messages to a JMS destination on a broker.
+ * <p>JmsIO sink supports writing text messages to a JMS destination on a broker.
* To configure a JMS sink, you must specify a {@link javax.jms.ConnectionFactory} and a
* {@link javax.jms.Destination} name.
* For instance:
@@ -175,7 +175,7 @@ public class JmsIO {
* that they can be stored in all JNDI naming contexts. In addition, it is recommended that
* these implementations follow the JavaBeansTM design patterns."
*
- * So, a {@link ConnectionFactory} implementation is serializable.
+ * <p>So, a {@link ConnectionFactory} implementation is serializable.
*/
protected ConnectionFactory connectionFactory;
@Nullable
@@ -201,8 +201,8 @@ public class JmsIO {
}
/**
- * Creates an {@link UnboundedSource<JmsRecord, ?>} with the configuration in
- * {@link Read}. Primary use case is unit tests, should not be used in an
+ * Creates an {@link UnboundedSource UnboundedSource<JmsRecord, ?>} with the configuration
+ * in {@link Read}. Primary use case is unit tests, should not be used in an
* application.
*/
@VisibleForTesting
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java b/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java
index 6769b31..e26f7c5 100644
--- a/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java
+++ b/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java
@@ -108,7 +108,7 @@ import org.slf4j.LoggerFactory;
* <p>Although most applications consumer single topic, the source can be configured to consume
* multiple topics or even a specific set of {@link TopicPartition}s.
*
- * <p> To configure a Kafka source, you must specify at the minimum Kafka <tt>bootstrapServers</tt>
+ * <p>To configure a Kafka source, you must specify at the minimum Kafka <tt>bootstrapServers</tt>
* and one or more topics to consume. The following example illustrates various options for
* configuring the source :
*
@@ -157,7 +157,7 @@ import org.slf4j.LoggerFactory;
*
* <h3>Writing to Kafka</h3>
*
- * KafkaIO sink supports writing key-value pairs to a Kafka topic. Users can also write
+ * <p>KafkaIO sink supports writing key-value pairs to a Kafka topic. Users can also write
* just the values. To configure a Kafka sink, you must specify at the minimum Kafka
* <tt>bootstrapServers</tt> and the topic to write to. The following example illustrates various
* options for configuring the sink:
@@ -179,7 +179,7 @@ import org.slf4j.LoggerFactory;
* );
* }</pre>
*
- * Often you might want to write just values without any keys to Kafka. Use {@code values()} to
+ * <p>Often you might want to write just values without any keys to Kafka. Use {@code values()} to
* write records with default empty(null) key:
*
* <pre>{@code
@@ -499,8 +499,8 @@ public class KafkaIO {
}
/**
- * Creates an {@link UnboundedSource<KafkaRecord<K, V>, ?>} with the configuration in
- * {@link TypedRead}. Primary use case is unit tests, should not be used in an
+ * Creates an {@link UnboundedSource UnboundedSource<KafkaRecord<K, V>, ?>} with the
+ * configuration in {@link TypedRead}. Primary use case is unit tests, should not be used in an
* application.
*/
@VisibleForTesting
@@ -633,7 +633,7 @@ public class KafkaIO {
* {@code min(desiredNumSplits, totalNumPartitions)}, though better not to depend on the exact
* count.
*
- * <p> It is important to assign the partitions deterministically so that we can support
+ * <p>It is important to assign the partitions deterministically so that we can support
* resuming a split from last checkpoint. The Kafka partitions are sorted by
* {@code <topic, partition>} and then assigned to splits in round-robin order.
*/
@@ -1297,8 +1297,8 @@ public class KafkaIO {
}
/**
- * Same as Write<K, V> without a Key. Null is used for key as it is the convention is Kafka
- * when there is no key specified. Majority of Kafka writers don't specify a key.
+ * Same as {@code Write<K, V>} without a Key. Null is used for key as it is the convention is
+ * Kafka when there is no key specified. Majority of Kafka writers don't specify a key.
*/
private static class KafkaValueWrite<V> extends PTransform<PCollection<V>, PDone> {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/CustomOptional.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/CustomOptional.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/CustomOptional.java
index 4515f38..4317a59 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/CustomOptional.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/CustomOptional.java
@@ -19,7 +19,7 @@ package org.apache.beam.sdk.io.kinesis;
import java.util.NoSuchElementException;
-/***
+/**
* Similar to Guava {@code Optional}, but throws {@link NoSuchElementException} for missing element.
*/
abstract class CustomOptional<T> {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/GetKinesisRecordsResult.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/GetKinesisRecordsResult.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/GetKinesisRecordsResult.java
index c0f00de..5a34d7d 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/GetKinesisRecordsResult.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/GetKinesisRecordsResult.java
@@ -24,7 +24,7 @@ import com.google.common.base.Function;
import java.util.List;
import javax.annotation.Nullable;
-/***
+/**
* Represents the output of 'get' operation on Kinesis stream.
*/
class GetKinesisRecordsResult {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisClientProvider.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisClientProvider.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisClientProvider.java
index 36c8953..c7fd7f6 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisClientProvider.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisClientProvider.java
@@ -23,7 +23,7 @@ import java.io.Serializable;
/**
* Provides instances of {@link AmazonKinesis} interface.
*
- * Please note, that any instance of {@link KinesisClientProvider} must be
+ * <p>Please note, that any instance of {@link KinesisClientProvider} must be
* {@link Serializable} to ensure it can be sent to worker machines.
*/
interface KinesisClientProvider extends Serializable {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java
index acff33f..945eff6 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java
@@ -71,13 +71,13 @@ import org.joda.time.Instant;
* (for example if you're using more sophisticated authorization methods like Amazon STS, etc.)
* you can do it by implementing {@link KinesisClientProvider} class:
*
- * <pre>{@code}
+ * <pre>{@code
* public class MyCustomKinesisClientProvider implements KinesisClientProvider {
* @Override
* public AmazonKinesis get() {
* // set up your client here
* }
- * }
+ * }}
* </pre>
*
* Usage is pretty straightforward:
@@ -105,7 +105,7 @@ import org.joda.time.Instant;
*
*/
public final class KinesisIO {
- /***
+ /**
* A {@link PTransform} that reads from a Kinesis stream.
*/
public static final class Read {
@@ -118,7 +118,7 @@ public final class KinesisIO {
this.initialPosition = checkNotNull(initialPosition, "initialPosition");
}
- /***
+ /**
* Specify reading from streamName at some initial position.
*/
public static Read from(String streamName, InitialPositionInStream initialPosition) {
@@ -126,7 +126,7 @@ public final class KinesisIO {
checkNotNull(initialPosition, "initialPosition")));
}
- /***
+ /**
* Specify reading from streamName beginning at given {@link Instant}.
* This {@link Instant} must be in the past, i.e. before {@link Instant#now()}.
*/
@@ -135,7 +135,7 @@ public final class KinesisIO {
checkNotNull(initialTimestamp, "initialTimestamp")));
}
- /***
+ /**
* Allows to specify custom {@link KinesisClientProvider}.
* {@link KinesisClientProvider} provides {@link AmazonKinesis} instances which are later
* used for communication with Kinesis.
@@ -149,7 +149,7 @@ public final class KinesisIO {
initialPosition));
}
- /***
+ /**
* Specify credential details and region to be used to read from Kinesis.
* If you need more sophisticated credential protocol, then you should look at
* {@link Read#using(KinesisClientProvider)}.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReader.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReader.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReader.java
index 219a705..2138094 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReader.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReader.java
@@ -30,7 +30,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/***
+/**
* Reads data from multiple kinesis shards in a single thread.
* It uses simple round robin algorithm when fetching data from shards.
*/
@@ -52,7 +52,7 @@ class KinesisReader extends UnboundedSource.UnboundedReader<KinesisRecord> {
this.source = source;
}
- /***
+ /**
* Generates initial checkpoint and instantiates iterators for shards.
*/
@Override
@@ -74,7 +74,7 @@ class KinesisReader extends UnboundedSource.UnboundedReader<KinesisRecord> {
return advance();
}
- /***
+ /**
* Moves to the next record in one of the shards.
* If current shard iterator can be move forward (i.e. there's a record present) then we do it.
* If not, we iterate over shards in a round-robin manner.
@@ -106,7 +106,7 @@ class KinesisReader extends UnboundedSource.UnboundedReader<KinesisRecord> {
return currentRecord.get();
}
- /***
+ /**
* When {@link KinesisReader} was advanced to the current record.
* We cannot use approximate arrival timestamp given for each record by Kinesis as it
* is not guaranteed to be accurate - this could lead to mark some records as "late"
@@ -121,7 +121,7 @@ class KinesisReader extends UnboundedSource.UnboundedReader<KinesisRecord> {
public void close() throws IOException {
}
- /***
+ /**
* Current time.
* We cannot give better approximation of the watermark with current semantics of
* {@link KinesisReader#getCurrentTimestamp()}, because we don't know when the next
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReaderCheckpoint.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReaderCheckpoint.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReaderCheckpoint.java
index 663ba44..f0fa45d 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReaderCheckpoint.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReaderCheckpoint.java
@@ -30,7 +30,7 @@ import java.util.List;
import javax.annotation.Nullable;
import org.apache.beam.sdk.io.UnboundedSource;
-/***
+/**
* Checkpoint representing a total progress in a set of shards in single stream.
* The set of shards covered by {@link KinesisReaderCheckpoint} may or may not be equal to set of
* all shards present in the stream.
@@ -59,7 +59,7 @@ class KinesisReaderCheckpoint implements Iterable<ShardCheckpoint>, UnboundedSou
}));
}
- /***
+ /**
* Splits given multi-shard checkpoint into partitions of approximately equal size.
*
* @param desiredNumSplits - upper limit for number of partitions to generate.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisRecordCoder.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisRecordCoder.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisRecordCoder.java
index 5b13e31..fc087b5 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisRecordCoder.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisRecordCoder.java
@@ -29,7 +29,7 @@ import org.apache.beam.sdk.coders.StringUtf8Coder;
import org.apache.beam.sdk.coders.VarLongCoder;
import org.joda.time.Instant;
-/***
+/**
* A {@link Coder} for {@link KinesisRecord}.
*/
class KinesisRecordCoder extends AtomicCoder<KinesisRecord> {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisSource.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisSource.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisSource.java
index 62cba08..45e0b51 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisSource.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisSource.java
@@ -29,7 +29,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/***
+/**
* Represents source for single stream in Kinesis.
*/
class KinesisSource extends UnboundedSource<KinesisRecord, KinesisReaderCheckpoint> {
@@ -50,7 +50,7 @@ class KinesisSource extends UnboundedSource<KinesisRecord, KinesisReaderCheckpoi
validate();
}
- /***
+ /**
* Generate splits for reading from the stream.
* Basically, it'll try to evenly split set of shards in the stream into
* {@code desiredNumSplits} partitions. Each partition is then a split.
@@ -71,7 +71,7 @@ class KinesisSource extends UnboundedSource<KinesisRecord, KinesisReaderCheckpoi
return sources;
}
- /***
+ /**
* Creates reader based on given {@link KinesisReaderCheckpoint}.
* If {@link KinesisReaderCheckpoint} is not given, then we use
* {@code initialCheckpointGenerator} to generate new checkpoint.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RecordFilter.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RecordFilter.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RecordFilter.java
index 4c7f39a..40e65fc 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RecordFilter.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RecordFilter.java
@@ -24,9 +24,9 @@ import java.util.List;
/**
* Filters out records, which were already processed and checkpointed.
- * <p>
- * We need this step, because we can get iterators from Kinesis only with "sequenceNumber" accuracy,
- * not with "subSequenceNumber" accuracy.
+ *
+ * <p>We need this step, because we can get iterators from Kinesis only with "sequenceNumber"
+ * accuracy, not with "subSequenceNumber" accuracy.
*/
class RecordFilter {
public List<KinesisRecord> apply(List<KinesisRecord> records, ShardCheckpoint checkpoint) {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RoundRobin.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RoundRobin.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RoundRobin.java
index 7adae4b..e4ff541 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RoundRobin.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RoundRobin.java
@@ -23,7 +23,7 @@ import static com.google.common.collect.Queues.newArrayDeque;
import java.util.Deque;
import java.util.Iterator;
-/***
+/**
* Very simple implementation of round robin algorithm.
*/
class RoundRobin<T> implements Iterable<T> {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardCheckpoint.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardCheckpoint.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardCheckpoint.java
index 9920aca..6aa3504 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardCheckpoint.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardCheckpoint.java
@@ -31,7 +31,7 @@ import java.io.Serializable;
import org.joda.time.Instant;
-/***
+/**
* Checkpoint mark for single shard in the stream.
* Current position in the shard is determined by either:
* <ul>
@@ -96,7 +96,7 @@ class ShardCheckpoint implements Serializable {
this.timestamp = timestamp;
}
- /***
+ /**
* Used to compare {@link ShardCheckpoint} object to {@link KinesisRecord}. Depending
* on the the underlying shardIteratorType, it will either compare the timestamp or the
* {@link ExtendedSequenceNumber}.
@@ -151,7 +151,7 @@ class ShardCheckpoint implements Serializable {
return shardIteratorType == AFTER_SEQUENCE_NUMBER && subSequenceNumber != null;
}
- /***
+ /**
* Used to advance checkpoint mark to position after given {@link Record}.
*
* @param record
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardRecordsIterator.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardRecordsIterator.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardRecordsIterator.java
index d17996a..872f604 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardRecordsIterator.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardRecordsIterator.java
@@ -25,7 +25,7 @@ import java.util.Deque;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/***
+/**
* Iterates over records in a single shard.
* Under the hood records are retrieved from Kinesis in batches and stored in the in-memory queue.
* Then the caller of {@link ShardRecordsIterator#next()} can read from queue one by one.
@@ -56,7 +56,7 @@ class ShardRecordsIterator {
shardIterator = checkpoint.getShardIterator(kinesis);
}
- /***
+ /**
* Returns record if there's any present.
* Returns absent() if there are no new records at this time in the shard.
*/
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/SimplifiedKinesisClient.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/SimplifiedKinesisClient.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/SimplifiedKinesisClient.java
index 96267d1..3e3984a 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/SimplifiedKinesisClient.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/SimplifiedKinesisClient.java
@@ -36,7 +36,7 @@ import java.util.List;
import java.util.concurrent.Callable;
import org.joda.time.Instant;
-/***
+/**
* Wraps {@link AmazonKinesis} class providing much simpler interface and
* proper error handling.
*/
@@ -91,7 +91,7 @@ class SimplifiedKinesisClient {
});
}
- /***
+ /**
* Gets records from Kinesis and deaggregates them if needed.
*
* @return list of deaggregated records
@@ -102,7 +102,7 @@ class SimplifiedKinesisClient {
return getRecords(shardIterator, streamName, shardId, null);
}
- /***
+ /**
* Gets records from Kinesis and deaggregates them if needed.
*
* @return list of deaggregated records
@@ -126,7 +126,7 @@ class SimplifiedKinesisClient {
});
}
- /***
+ /**
* Wraps Amazon specific exceptions into more friendly format.
*
* @throws TransientKinesisException - in case of recoverable situation, i.e.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/StartingPoint.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/StartingPoint.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/StartingPoint.java
index b7ee917..d8842c4 100644
--- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/StartingPoint.java
+++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/StartingPoint.java
@@ -26,7 +26,7 @@ import java.io.Serializable;
import java.util.Objects;
import org.joda.time.Instant;
-/***
+/**
* Denotes a point at which the reader should start reading from a Kinesis stream.
* It can be expressed either as an {@link InitialPositionInStream} enum constant or a timestamp,
* in which case the reader will start reading at the specified point in time.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisTestOptions.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisTestOptions.java b/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisTestOptions.java
index 65a7605..324de46 100644
--- a/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisTestOptions.java
+++ b/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisTestOptions.java
@@ -21,7 +21,7 @@ import org.apache.beam.sdk.options.Default;
import org.apache.beam.sdk.options.Description;
import org.apache.beam.sdk.testing.TestPipelineOptions;
-/***
+/**
* Options for Kinesis integration tests.
*/
public interface KinesisTestOptions extends TestPipelineOptions {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisUploader.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisUploader.java b/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisUploader.java
index b1c212b..7518ff7 100644
--- a/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisUploader.java
+++ b/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisUploader.java
@@ -32,7 +32,7 @@ import com.google.common.collect.Lists;
import java.nio.ByteBuffer;
import java.util.List;
-/***
+/**
* Sends records to Kinesis in reliable way.
*/
public class KinesisUploader {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbGridFSIO.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbGridFSIO.java b/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbGridFSIO.java
index bdf0e53..8c9a65c 100644
--- a/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbGridFSIO.java
+++ b/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbGridFSIO.java
@@ -66,19 +66,17 @@ import org.joda.time.Instant;
* and the bucket name. If unspecified, the default values from the GridFS driver are used.</p>
*
* <p>The following example illustrates various options for configuring the
- * source:</p>
+ * source:
*
* <pre>{@code
- *
* pipeline.apply(MongoDbGridFSIO.<String>read()
* .withUri("mongodb://localhost:27017")
* .withDatabase("my-database")
* .withBucket("my-bucket"))
- *
* }</pre>
*
* <p>The source also accepts an optional configuration: {@code withQueryFilter()} allows you to
- * define a JSON filter to get subset of files in the database.</p>
+ * define a JSON filter to get subset of files in the database.
*
* <p>There is also an optional {@code Parser} (and associated {@code Coder}) that can be
* specified that can be used to parse the InputStream into objects usable with Beam. By default,
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbIO.java
----------------------------------------------------------------------
diff --git a/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbIO.java b/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbIO.java
index d5659e9..20b9265 100644
--- a/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbIO.java
+++ b/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbIO.java
@@ -51,11 +51,11 @@ import org.slf4j.LoggerFactory;
* <h3>Reading from MongoDB</h3>
*
* <p>MongoDbIO source returns a bounded collection of String as {@code PCollection<String>}.
- * The String is the JSON form of the MongoDB Document.</p>
+ * The String is the JSON form of the MongoDB Document.
*
* <p>To configure the MongoDB source, you have to provide the connection URI, the database name
* and the collection name. The following example illustrates various options for configuring the
- * source:</p>
+ * source:
*
* <pre>{@code
*
@@ -282,9 +282,8 @@ public class MongoDbIO {
* <li>_id: 109</li>
* <li>_id: 256</li>
* </ul>
- * </p>
*
- * This method will generate a list of range filters performing the following splits:
+ * <p>This method will generate a list of range filters performing the following splits:
* <ul>
* <li>from the beginning of the collection up to _id 56, so basically data with
* _id lower than 56</li>
[2/3] incubator-beam git commit: checkstyle: improve Javadoc checking
Posted by dh...@apache.org.
checkstyle: improve Javadoc checking
* Enable JavadocParagraph, which gives a consistent style in how we format paragraphs.
This caught a LOT of missing tags and other issues.
* This also caught a lot of HTML issues that led to mis-rendering Javadoc.
Project: http://git-wip-us.apache.org/repos/asf/incubator-beam/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-beam/commit/9e30a989
Tree: http://git-wip-us.apache.org/repos/asf/incubator-beam/tree/9e30a989
Diff: http://git-wip-us.apache.org/repos/asf/incubator-beam/diff/9e30a989
Branch: refs/heads/master
Commit: 9e30a989dddb95854e0c403d58c1b5fb137ee720
Parents: 7c2124b
Author: Dan Halperin <dh...@google.com>
Authored: Wed Oct 5 20:24:00 2016 -0700
Committer: Dan Halperin <dh...@google.com>
Committed: Tue Oct 11 16:08:15 2016 -0700
----------------------------------------------------------------------
.../beam/examples/cookbook/TriggerExample.java | 28 ++++++++++----------
.../beam/examples/complete/game/GameStats.java | 8 +++---
.../examples/complete/game/HourlyTeamScore.java | 6 ++---
.../examples/complete/game/LeaderBoard.java | 10 +++----
.../beam/examples/complete/game/UserScore.java | 8 +++---
.../complete/game/injector/Injector.java | 10 +++----
.../apache/beam/runners/core/DoFnRunner.java | 2 +-
.../runners/direct/DirectExecutionContext.java | 2 +-
.../beam/runners/direct/DirectRunner.java | 13 +++++----
.../direct/ExecutorServiceParallelExecutor.java | 2 +-
.../beam/runners/direct/TransformEvaluator.java | 2 +-
.../beam/runners/direct/TransformResult.java | 4 +--
.../runners/direct/ViewEvaluatorFactory.java | 2 +-
.../beam/runners/direct/WatermarkManager.java | 4 +--
.../beam/runners/flink/examples/TFIDF.java | 12 ++++-----
.../beam/runners/flink/examples/WordCount.java | 4 +--
.../flink/examples/streaming/AutoComplete.java | 4 +--
.../runners/flink/FlinkPipelineOptions.java | 8 +++---
.../apache/beam/runners/flink/FlinkRunner.java | 1 -
.../runners/flink/FlinkRunnerRegistrar.java | 4 +--
.../functions/FlinkMultiOutputDoFnFunction.java | 2 +-
.../beam/runners/flink/FlinkTestPipeline.java | 6 ++---
.../beam/runners/dataflow/DataflowRunner.java | 9 ++++---
.../dataflow/internal/AssignWindows.java | 6 ++---
.../runners/dataflow/internal/IsmFormat.java | 8 +++---
.../options/DataflowPipelineDebugOptions.java | 3 +--
.../runners/dataflow/util/RandomAccessData.java | 8 +++---
.../apache/beam/runners/spark/SparkRunner.java | 2 +-
.../beam/runners/spark/TestSparkRunner.java | 4 +--
.../runners/spark/coders/WritableCoder.java | 2 +-
.../spark/io/hadoop/ShardNameTemplateAware.java | 2 +-
.../translation/GroupCombineFunctions.java | 4 +--
.../spark/translation/TranslationUtils.java | 2 +-
.../RecoverFromCheckpointStreamingTest.java | 4 +--
.../src/main/resources/beam/checkstyle.xml | 3 +++
.../java/org/apache/beam/sdk/io/AvroSource.java | 1 +
.../org/apache/beam/sdk/io/BoundedSource.java | 19 ++++++++-----
.../org/apache/beam/sdk/io/FileBasedSink.java | 1 +
.../apache/beam/sdk/io/OffsetBasedSource.java | 2 ++
.../java/org/apache/beam/sdk/io/PubsubIO.java | 1 +
.../main/java/org/apache/beam/sdk/io/Sink.java | 3 +++
.../java/org/apache/beam/sdk/io/TextIO.java | 11 ++++----
.../main/java/org/apache/beam/sdk/io/Write.java | 2 +-
.../apache/beam/sdk/io/range/RangeTracker.java | 1 +
.../beam/sdk/options/PipelineOptions.java | 8 +++---
.../sdk/options/ProxyInvocationHandler.java | 2 +-
.../beam/sdk/testing/SerializableMatchers.java | 4 +--
.../beam/sdk/testing/SourceTestUtils.java | 3 ++-
.../sdk/transforms/ApproximateQuantiles.java | 5 ++--
.../org/apache/beam/sdk/transforms/Combine.java | 6 ++---
.../apache/beam/sdk/transforms/CombineFns.java | 8 +++---
.../beam/sdk/transforms/CombineWithContext.java | 4 +--
.../apache/beam/sdk/transforms/GroupByKey.java | 4 +--
.../org/apache/beam/sdk/transforms/Latest.java | 12 ++++-----
.../org/apache/beam/sdk/transforms/OldDoFn.java | 2 +-
.../apache/beam/sdk/transforms/PTransform.java | 8 +++---
.../org/apache/beam/sdk/transforms/ParDo.java | 14 +++++-----
.../beam/sdk/transforms/RemoveDuplicates.java | 5 ++--
.../org/apache/beam/sdk/transforms/ViewFn.java | 2 +-
.../apache/beam/sdk/transforms/WithKeys.java | 2 +-
.../sdk/transforms/display/DisplayData.java | 8 +++---
.../sdk/transforms/reflect/DoFnInvoker.java | 2 +-
.../transforms/windowing/AfterWatermark.java | 4 +--
.../beam/sdk/transforms/windowing/Never.java | 3 +--
.../beam/sdk/transforms/windowing/PaneInfo.java | 4 +--
.../transforms/windowing/SlidingWindows.java | 3 +--
.../beam/sdk/transforms/windowing/Window.java | 18 ++++++-------
.../beam/sdk/util/BaseExecutionContext.java | 4 +--
.../sdk/util/ExposedByteArrayOutputStream.java | 1 +
.../apache/beam/sdk/util/GatherAllPanes.java | 10 +++----
.../beam/sdk/util/PerKeyCombineFnRunners.java | 4 +--
.../org/apache/beam/sdk/util/PubsubClient.java | 3 +++
.../apache/beam/sdk/util/PubsubTestClient.java | 2 +-
.../apache/beam/sdk/util/TimerInternals.java | 2 +-
.../java/org/apache/beam/sdk/values/PInput.java | 2 +-
.../apache/beam/sdk/values/TypeDescriptors.java | 16 +++++------
.../beam/sdk/testing/SystemNanoTimeSleeper.java | 2 +-
.../beam/sdk/transforms/DoFnTesterTest.java | 2 +-
.../sdk/io/gcp/bigquery/BigQueryAvroUtils.java | 2 +-
.../beam/sdk/io/gcp/bigquery/BigQueryIO.java | 5 ++++
.../sdk/io/gcp/bigquery/BigQueryServices.java | 2 +-
.../beam/sdk/io/gcp/datastore/DatastoreIO.java | 2 +-
.../beam/sdk/io/gcp/datastore/DatastoreV1.java | 2 +-
.../sdk/io/gcp/datastore/SplitQueryFnIT.java | 4 +--
.../beam/sdk/io/gcp/datastore/V1ReadIT.java | 2 +-
.../beam/sdk/io/gcp/datastore/V1WriteIT.java | 2 +-
.../apache/beam/sdk/io/hdfs/HDFSFileSource.java | 3 ++-
.../apache/beam/sdk/io/hdfs/WritableCoder.java | 2 +-
.../SimpleAuthAvroHDFSFileSource.java | 2 +-
.../hdfs/simpleauth/SimpleAuthHDFSFileSink.java | 2 +-
.../simpleauth/SimpleAuthHDFSFileSource.java | 2 +-
.../java/org/apache/beam/sdk/io/jms/JmsIO.java | 8 +++---
.../org/apache/beam/sdk/io/kafka/KafkaIO.java | 16 +++++------
.../beam/sdk/io/kinesis/CustomOptional.java | 2 +-
.../sdk/io/kinesis/GetKinesisRecordsResult.java | 2 +-
.../sdk/io/kinesis/KinesisClientProvider.java | 2 +-
.../apache/beam/sdk/io/kinesis/KinesisIO.java | 14 +++++-----
.../beam/sdk/io/kinesis/KinesisReader.java | 10 +++----
.../sdk/io/kinesis/KinesisReaderCheckpoint.java | 4 +--
.../beam/sdk/io/kinesis/KinesisRecordCoder.java | 2 +-
.../beam/sdk/io/kinesis/KinesisSource.java | 6 ++---
.../beam/sdk/io/kinesis/RecordFilter.java | 6 ++---
.../apache/beam/sdk/io/kinesis/RoundRobin.java | 2 +-
.../beam/sdk/io/kinesis/ShardCheckpoint.java | 6 ++---
.../sdk/io/kinesis/ShardRecordsIterator.java | 4 +--
.../sdk/io/kinesis/SimplifiedKinesisClient.java | 8 +++---
.../beam/sdk/io/kinesis/StartingPoint.java | 2 +-
.../beam/sdk/io/kinesis/KinesisTestOptions.java | 2 +-
.../beam/sdk/io/kinesis/KinesisUploader.java | 2 +-
.../beam/sdk/io/mongodb/MongoDbGridFSIO.java | 6 ++---
.../apache/beam/sdk/io/mongodb/MongoDbIO.java | 7 +++--
111 files changed, 293 insertions(+), 269 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/examples/java/src/main/java/org/apache/beam/examples/cookbook/TriggerExample.java
----------------------------------------------------------------------
diff --git a/examples/java/src/main/java/org/apache/beam/examples/cookbook/TriggerExample.java b/examples/java/src/main/java/org/apache/beam/examples/cookbook/TriggerExample.java
index 68d4d32..d965d4a 100644
--- a/examples/java/src/main/java/org/apache/beam/examples/cookbook/TriggerExample.java
+++ b/examples/java/src/main/java/org/apache/beam/examples/cookbook/TriggerExample.java
@@ -61,11 +61,11 @@ import org.joda.time.Instant;
* {@link org.apache.beam.sdk.transforms.windowing.Trigger triggers} to control when the results for
* each window are emitted.
*
- * <p> This example uses a portion of real traffic data from San Diego freeways. It contains
+ * <p>This example uses a portion of real traffic data from San Diego freeways. It contains
* readings from sensor stations set up along each freeway. Each sensor reading includes a
* calculation of the 'total flow' across all lanes in that freeway direction.
*
- * <p> Concepts:
+ * <p>Concepts:
* <pre>
* 1. The default triggering behavior
* 2. Late data with the default trigger
@@ -73,28 +73,28 @@ import org.joda.time.Instant;
* 4. Combining late data and speculative estimates
* </pre>
*
- * <p> Before running this example, it will be useful to familiarize yourself with Beam triggers
+ * <p>Before running this example, it will be useful to familiarize yourself with Beam triggers
* and understand the concept of 'late data',
* See: <a href="http://beam.incubator.apache.org/use/walkthroughs/">
* http://beam.incubator.apache.org/use/walkthroughs/</a>
*
- * <p> The example is configured to use the default BigQuery table from the example common package
+ * <p>The example is configured to use the default BigQuery table from the example common package
* (there are no defaults for a general Beam pipeline).
* You can override them by using the {@code --bigQueryDataset}, and {@code --bigQueryTable}
* options. If the BigQuery table do not exist, the example will try to create them.
*
- * <p> The pipeline outputs its results to a BigQuery table.
+ * <p>The pipeline outputs its results to a BigQuery table.
* Here are some queries you can use to see interesting results:
* Replace {@code <enter_table_name>} in the query below with the name of the BigQuery table.
* Replace {@code <enter_window_interval>} in the query below with the window interval.
*
- * <p> To see the results of the default trigger,
+ * <p>To see the results of the default trigger,
* Note: When you start up your pipeline, you'll initially see results from 'late' data. Wait after
* the window duration, until the first pane of non-late data has been emitted, to see more
* interesting results.
* {@code SELECT * FROM enter_table_name WHERE trigger_type = "default" ORDER BY window DESC}
*
- * <p> To see the late data i.e. dropped by the default trigger,
+ * <p>To see the late data i.e. dropped by the default trigger,
* {@code SELECT * FROM <enter_table_name> WHERE trigger_type = "withAllowedLateness" and
* (timing = "LATE" or timing = "ON_TIME") and freeway = "5" ORDER BY window DESC, processing_time}
*
@@ -103,23 +103,23 @@ import org.joda.time.Instant;
* (trigger_type = "withAllowedLateness" or trigger_type = "sequential") and freeway = "5" ORDER BY
* window DESC, processing_time}
*
- * <p> To see speculative results every minute,
+ * <p>To see speculative results every minute,
* {@code SELECT * FROM <enter_table_name> WHERE trigger_type = "speculative" and freeway = "5"
* ORDER BY window DESC, processing_time}
*
- * <p> To see speculative results every five minutes after the end of the window
+ * <p>To see speculative results every five minutes after the end of the window
* {@code SELECT * FROM <enter_table_name> WHERE trigger_type = "sequential" and timing != "EARLY"
* and freeway = "5" ORDER BY window DESC, processing_time}
*
- * <p> To see the first and the last pane for a freeway in a window for all the trigger types,
+ * <p>To see the first and the last pane for a freeway in a window for all the trigger types,
* {@code SELECT * FROM <enter_table_name> WHERE (isFirst = true or isLast = true) ORDER BY window}
*
- * <p> To reduce the number of results for each query we can add additional where clauses.
+ * <p>To reduce the number of results for each query we can add additional where clauses.
* For examples, To see the results of the default trigger,
* {@code SELECT * FROM <enter_table_name> WHERE trigger_type = "default" AND freeway = "5" AND
* window = "<enter_window_interval>"}
*
- * <p> The example will try to cancel the pipelines on the signal to terminate the process (CTRL-C)
+ * <p>The example will try to cancel the pipelines on the signal to terminate the process (CTRL-C)
* and then exits.
*/
@@ -153,13 +153,13 @@ public class TriggerExample {
* 5 | 60 | 10:27:20 | 10:27:25
* 5 | 60 | 10:29:00 | 11:11:00
*
- * <p> Beam tracks a watermark which records up to what point in event time the data is
+ * <p>Beam tracks a watermark which records up to what point in event time the data is
* complete. For the purposes of the example, we'll assume the watermark is approximately 15m
* behind the current processing time. In practice, the actual value would vary over time based
* on the systems knowledge of the current delay and contents of the backlog (data
* that has not yet been processed).
*
- * <p> If the watermark is 15m behind, then the window [10:00:00, 10:30:00) (in event time) would
+ * <p>If the watermark is 15m behind, then the window [10:00:00, 10:30:00) (in event time) would
* close at 10:44:59, when the watermark passes 10:30:00.
*/
static class CalculateTotalFlow
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java
----------------------------------------------------------------------
diff --git a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java
index f9957eb..e39a9ff 100644
--- a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java
+++ b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java
@@ -62,21 +62,21 @@ import org.slf4j.LoggerFactory;
* New concepts: session windows and finding session duration; use of both
* singleton and non-singleton side inputs.
*
- * <p> This pipeline builds on the {@link LeaderBoard} functionality, and adds some "business
+ * <p>This pipeline builds on the {@link LeaderBoard} functionality, and adds some "business
* intelligence" analysis: abuse detection and usage patterns. The pipeline derives the Mean user
* score sum for a window, and uses that information to identify likely spammers/robots. (The robots
* have a higher click rate than the human users). The 'robot' users are then filtered out when
* calculating the team scores.
*
- * <p> Additionally, user sessions are tracked: that is, we find bursts of user activity using
+ * <p>Additionally, user sessions are tracked: that is, we find bursts of user activity using
* session windows. Then, the mean session duration information is recorded in the context of
* subsequent fixed windowing. (This could be used to tell us what games are giving us greater
* user retention).
*
- * <p> Run {@code org.apache.beam.examples.complete.game.injector.Injector} to generate
+ * <p>Run {@code org.apache.beam.examples.complete.game.injector.Injector} to generate
* pubsub data for this pipeline. The {@code Injector} documentation provides more detail.
*
- * <p> To execute this pipeline using the Dataflow service, specify the pipeline configuration
+ * <p>To execute this pipeline using the Dataflow service, specify the pipeline configuration
* like this:
* <pre>{@code
* --project=YOUR_PROJECT_ID
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java
----------------------------------------------------------------------
diff --git a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java
index cf13899..1231c91 100644
--- a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java
+++ b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java
@@ -44,7 +44,7 @@ import org.joda.time.format.DateTimeFormatter;
* domain, following {@link UserScore}. In addition to the concepts introduced in {@link UserScore},
* new concepts include: windowing and element timestamps; use of {@code Filter.by()}.
*
- * <p> This pipeline processes data collected from gaming events in batch, building on {@link
+ * <p>This pipeline processes data collected from gaming events in batch, building on {@link
* UserScore} but using fixed windows. It calculates the sum of scores per team, for each window,
* optionally allowing specification of two timestamps before and after which data is filtered out.
* This allows a model where late data collected after the intended analysis window can be included,
@@ -53,7 +53,7 @@ import org.joda.time.format.DateTimeFormatter;
* {@link UserScore} pipeline. However, our batch processing is high-latency, in that we don't get
* results from plays at the beginning of the batch's time period until the batch is processed.
*
- * <p> To execute this pipeline using the Dataflow service, specify the pipeline configuration
+ * <p>To execute this pipeline using the Dataflow service, specify the pipeline configuration
* like this:
* <pre>{@code
* --project=YOUR_PROJECT_ID
@@ -64,7 +64,7 @@ import org.joda.time.format.DateTimeFormatter;
* </pre>
* where the BigQuery dataset you specify must already exist.
*
- * <p> Optionally include {@code --input} to specify the batch input file path.
+ * <p>Optionally include {@code --input} to specify the batch input file path.
* To indicate a time after which the data should be filtered out, include the
* {@code --stopMin} arg. E.g., {@code --stopMin=2015-10-18-23-59} indicates that any data
* timestamped after 23:59 PST on 2015-10-18 should not be included in the analysis.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java
----------------------------------------------------------------------
diff --git a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java
index 13bbf44..18a5aa1 100644
--- a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java
+++ b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java
@@ -57,26 +57,26 @@ import org.joda.time.format.DateTimeFormatter;
* early/speculative results; using .accumulatingFiredPanes() to do cumulative processing of late-
* arriving data.
*
- * <p> This pipeline processes an unbounded stream of 'game events'. The calculation of the team
+ * <p>This pipeline processes an unbounded stream of 'game events'. The calculation of the team
* scores uses fixed windowing based on event time (the time of the game play event), not
* processing time (the time that an event is processed by the pipeline). The pipeline calculates
* the sum of scores per team, for each window. By default, the team scores are calculated using
* one-hour windows.
*
- * <p> In contrast-- to demo another windowing option-- the user scores are calculated using a
+ * <p>In contrast-- to demo another windowing option-- the user scores are calculated using a
* global window, which periodically (every ten minutes) emits cumulative user score sums.
*
- * <p> In contrast to the previous pipelines in the series, which used static, finite input data,
+ * <p>In contrast to the previous pipelines in the series, which used static, finite input data,
* here we're using an unbounded data source, which lets us provide speculative results, and allows
* handling of late data, at much lower latency. We can use the early/speculative results to keep a
* 'leaderboard' updated in near-realtime. Our handling of late data lets us generate correct
* results, e.g. for 'team prizes'. We're now outputting window results as they're
* calculated, giving us much lower latency than with the previous batch examples.
*
- * <p> Run {@link injector.Injector} to generate pubsub data for this pipeline. The Injector
+ * <p>Run {@link injector.Injector} to generate pubsub data for this pipeline. The Injector
* documentation provides more detail on how to do this.
*
- * <p> To execute this pipeline using the Dataflow service, specify the pipeline configuration
+ * <p>To execute this pipeline using the Dataflow service, specify the pipeline configuration
* like this:
* <pre>{@code
* --project=YOUR_PROJECT_ID
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java
----------------------------------------------------------------------
diff --git a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java
index f05879f..fc4e7f3 100644
--- a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java
+++ b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java
@@ -48,15 +48,15 @@ import org.slf4j.LoggerFactory;
* BigQuery; using standalone DoFns; use of the sum by key transform; examples of
* Java 8 lambda syntax.
*
- * <p> In this gaming scenario, many users play, as members of different teams, over the course of a
+ * <p>In this gaming scenario, many users play, as members of different teams, over the course of a
* day, and their actions are logged for processing. Some of the logged game events may be late-
* arriving, if users play on mobile devices and go transiently offline for a period.
*
- * <p> This pipeline does batch processing of data collected from gaming events. It calculates the
+ * <p>This pipeline does batch processing of data collected from gaming events. It calculates the
* sum of scores per user, over an entire batch of gaming data (collected, say, for each day). The
* batch processing will not include any late data that arrives after the day's cutoff point.
*
- * <p> To execute this pipeline using the Dataflow service and static example input data, specify
+ * <p>To execute this pipeline using the Dataflow service and static example input data, specify
* the pipeline configuration like this:
* <pre>{@code
* --project=YOUR_PROJECT_ID
@@ -67,7 +67,7 @@ import org.slf4j.LoggerFactory;
* </pre>
* where the BigQuery dataset you specify must already exist.
*
- * <p> Optionally include the --input argument to specify a batch input file.
+ * <p>Optionally include the --input argument to specify a batch input file.
* See the --input default value for example batch data file, or use {@link injector.Injector} to
* generate your own batch data.
*/
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/examples/java8/src/main/java/org/apache/beam/examples/complete/game/injector/Injector.java
----------------------------------------------------------------------
diff --git a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/injector/Injector.java b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/injector/Injector.java
index 8f8bd9f..8c23cd7 100644
--- a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/injector/Injector.java
+++ b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/injector/Injector.java
@@ -40,22 +40,22 @@ import org.joda.time.format.DateTimeFormatter;
* This is a generator that simulates usage data from a mobile game, and either publishes the data
* to a pubsub topic or writes it to a file.
*
- * <p> The general model used by the generator is the following. There is a set of teams with team
+ * <p>The general model used by the generator is the following. There is a set of teams with team
* members. Each member is scoring points for their team. After some period, a team will dissolve
* and a new one will be created in its place. There is also a set of 'Robots', or spammer users.
* They hop from team to team. The robots are set to have a higher 'click rate' (generate more
* events) than the regular team members.
*
- * <p> Each generated line of data has the following form:
+ * <p>Each generated line of data has the following form:
* username,teamname,score,timestamp_in_ms,readable_time
* e.g.:
* user2_AsparagusPig,AsparagusPig,10,1445230923951,2015-11-02 09:09:28.224
*
- * <p> The Injector writes either to a PubSub topic, or a file. It will use the PubSub topic if
+ * <p>The Injector writes either to a PubSub topic, or a file. It will use the PubSub topic if
* specified. It takes the following arguments:
* {@code Injector project-name (topic-name|none) (filename|none)}.
*
- * <p> To run the Injector in the mode where it publishes to PubSub, you will need to authenticate
+ * <p>To run the Injector in the mode where it publishes to PubSub, you will need to authenticate
* locally using project-based service account credentials to avoid running over PubSub
* quota.
* See https://developers.google.com/identity/protocols/application-default-credentials
@@ -74,7 +74,7 @@ import org.joda.time.format.DateTimeFormatter;
* </pre>
* The pubsub topic will be created if it does not exist.
*
- * <p> To run the injector in write-to-file-mode, set the topic name to "none" and specify the
+ * <p>To run the injector in write-to-file-mode, set the topic name to "none" and specify the
* filename:
* <pre>{@code
* Injector <project-name> none <filename>
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/core-java/src/main/java/org/apache/beam/runners/core/DoFnRunner.java
----------------------------------------------------------------------
diff --git a/runners/core-java/src/main/java/org/apache/beam/runners/core/DoFnRunner.java b/runners/core-java/src/main/java/org/apache/beam/runners/core/DoFnRunner.java
index f4c8eea..ac64969 100644
--- a/runners/core-java/src/main/java/org/apache/beam/runners/core/DoFnRunner.java
+++ b/runners/core-java/src/main/java/org/apache/beam/runners/core/DoFnRunner.java
@@ -52,7 +52,7 @@ public interface DoFnRunner<InputT, OutputT> {
/**
* Gets this object as a {@link OldDoFn}.
*
- * Most implementors of this interface are expected to be {@link OldDoFn} instances, and will
+ * <p>Most implementors of this interface are expected to be {@link OldDoFn} instances, and will
* return themselves.
*/
OldDoFn<KeyedWorkItem<K, InputT>, KV<K, OutputT>> asDoFn();
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectExecutionContext.java
----------------------------------------------------------------------
diff --git a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectExecutionContext.java b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectExecutionContext.java
index 2d2b87d..8cec8f7 100644
--- a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectExecutionContext.java
+++ b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectExecutionContext.java
@@ -28,7 +28,7 @@ import org.apache.beam.sdk.util.state.CopyOnAccessInMemoryStateInternals;
/**
* Execution Context for the {@link DirectRunner}.
*
- * This implementation is not thread safe. A new {@link DirectExecutionContext} must be created
+ * <p>This implementation is not thread safe. A new {@link DirectExecutionContext} must be created
* for each thread that requires it.
*/
class DirectExecutionContext
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectRunner.java
----------------------------------------------------------------------
diff --git a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectRunner.java b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectRunner.java
index abcc57b..224101a 100644
--- a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectRunner.java
+++ b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectRunner.java
@@ -158,11 +158,10 @@ public class DirectRunner
* Return a new {@link CommittedBundle} that is like this one, except calls to
* {@link #getElements()} will return the provided elements. This bundle is unchanged.
*
- * <p>
- * The value of the {@link #getSynchronizedProcessingOutputWatermark() synchronized processing
- * output watermark} of the returned {@link CommittedBundle} is equal to the value returned from
- * the current bundle. This is used to ensure a {@link PTransform} that could not complete
- * processing on input elements properly holds the synchronized processing time to the
+ * <p>The value of the {@link #getSynchronizedProcessingOutputWatermark() synchronized
+ * processing output watermark} of the returned {@link CommittedBundle} is equal to the value
+ * returned from the current bundle. This is used to ensure a {@link PTransform} that could not
+ * complete processing on input elements properly holds the synchronized processing time to the
* appropriate value.
*/
CommittedBundle<T> withElements(Iterable<WindowedValue<T>> elements);
@@ -322,7 +321,7 @@ public class DirectRunner
/**
* The result of running a {@link Pipeline} with the {@link DirectRunner}.
*
- * Throws {@link UnsupportedOperationException} for all methods.
+ * <p>Throws {@link UnsupportedOperationException} for all methods.
*/
public static class DirectPipelineResult implements PipelineResult {
private final PipelineExecutor executor;
@@ -389,7 +388,7 @@ public class DirectRunner
* {@link DirectOptions#isShutdownUnboundedProducersWithMaxWatermark()} set to false,
* this method will never return.
*
- * See also {@link PipelineExecutor#awaitCompletion()}.
+ * <p>See also {@link PipelineExecutor#awaitCompletion()}.
*/
public State awaitCompletion() throws Throwable {
if (!state.isTerminal()) {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ExecutorServiceParallelExecutor.java
----------------------------------------------------------------------
diff --git a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ExecutorServiceParallelExecutor.java b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ExecutorServiceParallelExecutor.java
index 52c45c3..567def2 100644
--- a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ExecutorServiceParallelExecutor.java
+++ b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ExecutorServiceParallelExecutor.java
@@ -305,7 +305,7 @@ final class ExecutorServiceParallelExecutor implements PipelineExecutor {
/**
* An internal status update on the state of the executor.
*
- * Used to signal when the executor should be shut down (due to an exception).
+ * <p>Used to signal when the executor should be shut down (due to an exception).
*/
@AutoValue
abstract static class ExecutorUpdate {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformEvaluator.java
----------------------------------------------------------------------
diff --git a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformEvaluator.java b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformEvaluator.java
index 6c8e48b..1624fcb 100644
--- a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformEvaluator.java
+++ b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformEvaluator.java
@@ -37,7 +37,7 @@ public interface TransformEvaluator<InputT> {
/**
* Finish processing the bundle of this {@link TransformEvaluator}.
*
- * After {@link #finishBundle()} is called, the {@link TransformEvaluator} will not be reused,
+ * <p>After {@link #finishBundle()} is called, the {@link TransformEvaluator} will not be reused,
* and no more elements will be processed.
*
* @return an {@link TransformResult} containing the results of this bundle evaluation.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformResult.java
----------------------------------------------------------------------
diff --git a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformResult.java b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformResult.java
index 0b08294..ba2d48e 100644
--- a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformResult.java
+++ b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformResult.java
@@ -59,7 +59,7 @@ public interface TransformResult {
/**
* Returns the Watermark Hold for the transform at the time this result was produced.
*
- * If the transform does not set any watermark hold, returns
+ * <p>If the transform does not set any watermark hold, returns
* {@link BoundedWindow#TIMESTAMP_MAX_VALUE}.
*/
Instant getWatermarkHold();
@@ -67,7 +67,7 @@ public interface TransformResult {
/**
* Returns the State used by the transform.
*
- * If this evaluation did not access state, this may return null.
+ * <p>If this evaluation did not access state, this may return null.
*/
@Nullable
CopyOnAccessInMemoryStateInternals<?> getState();
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ViewEvaluatorFactory.java
----------------------------------------------------------------------
diff --git a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ViewEvaluatorFactory.java b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ViewEvaluatorFactory.java
index a4e8d6f..43a1225 100644
--- a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ViewEvaluatorFactory.java
+++ b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ViewEvaluatorFactory.java
@@ -139,7 +139,7 @@ class ViewEvaluatorFactory implements TransformEvaluatorFactory {
/**
* An in-process implementation of the {@link CreatePCollectionView} primitive.
*
- * This implementation requires the input {@link PCollection} to be an iterable
+ * <p>This implementation requires the input {@link PCollection} to be an iterable
* of {@code WindowedValue<ElemT>}, which is provided
* to {@link PCollectionView#getViewFn()} for conversion to {@link ViewT}.
*/
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/WatermarkManager.java
----------------------------------------------------------------------
diff --git a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/WatermarkManager.java b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/WatermarkManager.java
index 4a3108c..82a6e4f 100644
--- a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/WatermarkManager.java
+++ b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/WatermarkManager.java
@@ -168,7 +168,7 @@ public class WatermarkManager {
/**
* Returns the {@link WatermarkUpdate} that is a result of combining the two watermark updates.
*
- * If either of the input {@link WatermarkUpdate WatermarkUpdates} were advanced, the result
+ * <p>If either of the input {@link WatermarkUpdate WatermarkUpdates} were advanced, the result
* {@link WatermarkUpdate} has been advanced.
*/
public WatermarkUpdate union(WatermarkUpdate that) {
@@ -634,7 +634,7 @@ public class WatermarkManager {
* latestTime argument and put in in the result with the same key, then remove all of the keys
* which have no more pending timers.
*
- * The result collection retains ordering of timers (from earliest to latest).
+ * <p>The result collection retains ordering of timers (from earliest to latest).
*/
private static Map<StructuralKey<?>, List<TimerData>> extractFiredTimers(
Instant latestTime, Map<StructuralKey<?>, NavigableSet<TimerData>> objectTimers) {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java
----------------------------------------------------------------------
diff --git a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java
index a92d339..6d04e0b 100644
--- a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java
+++ b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java
@@ -65,9 +65,9 @@ import org.slf4j.LoggerFactory;
/**
* An example that computes a basic TF-IDF search table for a directory or GCS prefix.
*
- * <p> Concepts: joining data; side inputs; logging
+ * <p>Concepts: joining data; side inputs; logging
*
- * <p> To execute this pipeline locally, specify general pipeline configuration:
+ * <p>To execute this pipeline locally, specify general pipeline configuration:
* <pre>{@code
* --project=YOUR_PROJECT_ID
* }</pre>
@@ -76,7 +76,7 @@ import org.slf4j.LoggerFactory;
* --output=[YOUR_LOCAL_FILE | gs://YOUR_OUTPUT_PREFIX]
* }</pre>
*
- * <p> To execute this pipeline using the Dataflow service, specify pipeline configuration:
+ * <p>To execute this pipeline using the Dataflow service, specify pipeline configuration:
* <pre>{@code
* --project=YOUR_PROJECT_ID
* --stagingLocation=gs://YOUR_STAGING_DIRECTORY
@@ -85,14 +85,14 @@ import org.slf4j.LoggerFactory;
* --output=gs://YOUR_OUTPUT_PREFIX
* }</pre>
*
- * <p> The default input is {@code gs://dataflow-samples/shakespeare/} and can be overridden with
+ * <p>The default input is {@code gs://dataflow-samples/shakespeare/} and can be overridden with
* {@code --input}.
*/
public class TFIDF {
/**
* Options supported by {@link TFIDF}.
- * <p>
- * Inherits standard configuration options.
+ *
+ * <p>Inherits standard configuration options.
*/
private interface Options extends PipelineOptions, FlinkPipelineOptions {
@Description("Path to the directory or GCS prefix containing files to read from")
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/WordCount.java
----------------------------------------------------------------------
diff --git a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/WordCount.java b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/WordCount.java
index 9cce757..c816442 100644
--- a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/WordCount.java
+++ b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/WordCount.java
@@ -96,8 +96,8 @@ public class WordCount {
/**
* Options supported by {@link WordCount}.
- * <p>
- * Inherits standard configuration options.
+ *
+ * <p>Inherits standard configuration options.
*/
public interface Options extends PipelineOptions, FlinkPipelineOptions {
@Description("Path of the file to read from")
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java
----------------------------------------------------------------------
diff --git a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java
index 4636e3f..97ba232 100644
--- a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java
+++ b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java
@@ -155,7 +155,7 @@ public class AutoComplete {
/**
* Cheaper but higher latency.
*
- * <p> Returns two PCollections, the first is top prefixes of size greater
+ * <p>Returns two PCollections, the first is top prefixes of size greater
* than minPrefix, and the second is top prefixes of size exactly
* minPrefix.
*/
@@ -362,7 +362,7 @@ public class AutoComplete {
/**
* Options supported by this class.
*
- * <p> Inherits standard Dataflow configuration options.
+ * <p>Inherits standard Dataflow configuration options.
*/
private interface Options extends WindowedWordCount.StreamingWordCountOptions {
@Description("Whether to use the recursive algorithm")
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java
----------------------------------------------------------------------
diff --git a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java
index a067e76..be99f29 100644
--- a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java
+++ b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java
@@ -35,10 +35,10 @@ public interface FlinkPipelineOptions
/**
* List of local files to make available to workers.
- * <p>
- * Jars are placed on the worker's classpath.
- * <p>
- * The default value is the list of jars from the main program's classpath.
+ *
+ * <p>Jars are placed on the worker's classpath.
+ *
+ * <p>The default value is the list of jars from the main program's classpath.
*/
@Description("Jar-Files to send to all workers and put on the classpath. "
+ "The default value is all files from the classpath.")
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunner.java
----------------------------------------------------------------------
diff --git a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunner.java b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunner.java
index 137fdeb..932952d 100644
--- a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunner.java
+++ b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunner.java
@@ -65,7 +65,6 @@ import org.slf4j.LoggerFactory;
* A {@link PipelineRunner} that executes the operations in the
* pipeline by first translating them to a Flink Plan and then executing them either locally
* or on a Flink cluster, depending on the configuration.
- * <p>
*/
public class FlinkRunner extends PipelineRunner<FlinkRunnerResult> {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunnerRegistrar.java
----------------------------------------------------------------------
diff --git a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunnerRegistrar.java b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunnerRegistrar.java
index 0e4b513..681459a 100644
--- a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunnerRegistrar.java
+++ b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunnerRegistrar.java
@@ -27,10 +27,10 @@ import org.apache.beam.sdk.runners.PipelineRunnerRegistrar;
/**
- * AuteService registrar - will register FlinkRunner and FlinkOptions
+ * AutoService registrar - will register FlinkRunner and FlinkOptions
* as possible pipeline runner services.
*
- * It ends up in META-INF/services and gets picked up by Dataflow.
+ * <p>It ends up in META-INF/services and gets picked up by Beam.
*
*/
public class FlinkRunnerRegistrar {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/functions/FlinkMultiOutputDoFnFunction.java
----------------------------------------------------------------------
diff --git a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/functions/FlinkMultiOutputDoFnFunction.java b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/functions/FlinkMultiOutputDoFnFunction.java
index 9cc84ca..810609e 100644
--- a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/functions/FlinkMultiOutputDoFnFunction.java
+++ b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/functions/FlinkMultiOutputDoFnFunction.java
@@ -34,7 +34,7 @@ import org.apache.flink.util.Collector;
* Encapsulates a {@link OldDoFn} that can emit to multiple
* outputs inside a Flink {@link org.apache.flink.api.common.functions.RichMapPartitionFunction}.
*
- * We get a mapping from {@link org.apache.beam.sdk.values.TupleTag} to output index
+ * <p>We get a mapping from {@link org.apache.beam.sdk.values.TupleTag} to output index
* and must tag all outputs with the output number. Afterwards a filter will filter out
* those elements that are not to be in a specific output.
*/
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/runner/src/test/java/org/apache/beam/runners/flink/FlinkTestPipeline.java
----------------------------------------------------------------------
diff --git a/runners/flink/runner/src/test/java/org/apache/beam/runners/flink/FlinkTestPipeline.java b/runners/flink/runner/src/test/java/org/apache/beam/runners/flink/FlinkTestPipeline.java
index 9f7bc00..d6240c4 100644
--- a/runners/flink/runner/src/test/java/org/apache/beam/runners/flink/FlinkTestPipeline.java
+++ b/runners/flink/runner/src/test/java/org/apache/beam/runners/flink/FlinkTestPipeline.java
@@ -31,7 +31,7 @@ public class FlinkTestPipeline extends Pipeline {
/**
* Creates and returns a new test pipeline for batch execution.
*
- * <p> Use {@link org.apache.beam.sdk.testing.PAssert} to add tests, then call
+ * <p>Use {@link org.apache.beam.sdk.testing.PAssert} to add tests, then call
* {@link Pipeline#run} to execute the pipeline and check the tests.
*/
public static FlinkTestPipeline createForBatch() {
@@ -41,7 +41,7 @@ public class FlinkTestPipeline extends Pipeline {
/**
* Creates and returns a new test pipeline for streaming execution.
*
- * <p> Use {@link org.apache.beam.sdk.testing.PAssert} to add tests, then call
+ * <p>Use {@link org.apache.beam.sdk.testing.PAssert} to add tests, then call
* {@link Pipeline#run} to execute the pipeline and check the tests.
*
* @return The Test Pipeline
@@ -53,7 +53,7 @@ public class FlinkTestPipeline extends Pipeline {
/**
* Creates and returns a new test pipeline for streaming or batch execution.
*
- * <p> Use {@link org.apache.beam.sdk.testing.PAssert} to add tests, then call
+ * <p>Use {@link org.apache.beam.sdk.testing.PAssert} to add tests, then call
* {@link Pipeline#run} to execute the pipeline and check the tests.
*
* @param streaming <code>True</code> for streaming mode, <code>False</code> for batch.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/DataflowRunner.java
----------------------------------------------------------------------
diff --git a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/DataflowRunner.java b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/DataflowRunner.java
index 64ac3ad..646a145 100644
--- a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/DataflowRunner.java
+++ b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/DataflowRunner.java
@@ -174,11 +174,12 @@ import org.slf4j.LoggerFactory;
* to the Dataflow representation using the {@link DataflowPipelineTranslator} and then submitting
* them to a Dataflow service for execution.
*
- * <p><h3>Permissions</h3>
+ * <h3>Permissions</h3>
*
- * When reading from a Dataflow source or writing to a Dataflow sink using {@code DataflowRunner},
- * the Google cloudservices account and the Google compute engine service account of the GCP project
- * running the Dataflow Job will need access to the corresponding source/sink.
+ * <p>When reading from a Dataflow source or writing to a Dataflow sink using
+ * {@code DataflowRunner}, the Google cloudservices account and the Google compute engine service
+ * account of the GCP project running the Dataflow Job will need access to the corresponding
+ * source/sink.
*
* <p>Please see <a href="https://cloud.google.com/dataflow/security-and-permissions">Google Cloud
* Dataflow Security and Permissions</a> for more details.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/AssignWindows.java
----------------------------------------------------------------------
diff --git a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/AssignWindows.java b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/AssignWindows.java
index d4f9a90..62d4aff 100644
--- a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/AssignWindows.java
+++ b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/AssignWindows.java
@@ -30,14 +30,14 @@ import org.apache.beam.sdk.values.PCollection;
* A primitive {@link PTransform} that implements the {@link Window#into(WindowFn)}
* {@link PTransform}.
*
- * For an application of {@link Window#into(WindowFn)} that changes the {@link WindowFn}, applies
+ * <p>For an application of {@link Window#into(WindowFn)} that changes the {@link WindowFn}, applies
* a primitive {@link PTransform} in the Dataflow service.
*
- * For an application of {@link Window#into(WindowFn)} that does not change the {@link WindowFn},
+ * <p>For an application of {@link Window#into(WindowFn)} that does not change the {@link WindowFn},
* applies an identity {@link ParDo} and sets the windowing strategy of the output
* {@link PCollection}.
*
- * For internal use only.
+ * <p>For internal use only.
*
* @param <T> the type of input element
*/
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/IsmFormat.java
----------------------------------------------------------------------
diff --git a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/IsmFormat.java b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/IsmFormat.java
index bb8daf3..903e7b4 100644
--- a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/IsmFormat.java
+++ b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/IsmFormat.java
@@ -294,7 +294,7 @@ public class IsmFormat {
/**
* Computes the shard id for the given key component(s).
*
- * The shard keys are encoded into their byte representations and hashed using the
+ * <p>The shard keys are encoded into their byte representations and hashed using the
* <a href="http://smhasher.googlecode.com/svn/trunk/MurmurHash3.cpp">
* 32-bit murmur3 algorithm, x86 variant</a> (little-endian variant),
* using {@code 1225801234} as the seed value. We ensure that shard ids for
@@ -307,7 +307,7 @@ public class IsmFormat {
/**
* Computes the shard id for the given key component(s).
*
- * Mutates {@code keyBytes} such that when returned, contains the encoded
+ * <p>Mutates {@code keyBytes} such that when returned, contains the encoded
* version of the key components.
*/
public <V, T> int encodeAndHash(List<?> keyComponents, RandomAccessData keyBytesToMutate) {
@@ -317,7 +317,7 @@ public class IsmFormat {
/**
* Computes the shard id for the given key component(s).
*
- * Mutates {@code keyBytes} such that when returned, contains the encoded
+ * <p>Mutates {@code keyBytes} such that when returned, contains the encoded
* version of the key components. Also, mutates {@code keyComponentByteOffsetsToMutate} to
* store the location where each key component's encoded byte representation ends within
* {@code keyBytes}.
@@ -619,7 +619,7 @@ public class IsmFormat {
/**
* A coder for {@link IsmShard}s.
*
- * The shard descriptor is encoded as:
+ * <p>The shard descriptor is encoded as:
* <ul>
* <li>id (variable length integer encoding)</li>
* <li>blockOffset (variable length long encoding)</li>
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java
----------------------------------------------------------------------
diff --git a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java
index dfe538d..8501f9f 100644
--- a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java
+++ b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java
@@ -186,8 +186,7 @@ public interface DataflowPipelineDebugOptions extends PipelineOptions {
* thrashing or out of memory. The location of the heap file will either be echoed back
* to the user, or the user will be given the opportunity to download the heap file.
*
- * <p>
- * CAUTION: Heap dumps can of comparable size to the default boot disk. Consider increasing
+ * <p>CAUTION: Heap dumps can of comparable size to the default boot disk. Consider increasing
* the boot disk size before setting this flag to true.
*/
@Description("If {@literal true}, save a heap dump before killing a thread or process "
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/util/RandomAccessData.java
----------------------------------------------------------------------
diff --git a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/util/RandomAccessData.java b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/util/RandomAccessData.java
index 683e16b..84c53ea 100644
--- a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/util/RandomAccessData.java
+++ b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/util/RandomAccessData.java
@@ -44,8 +44,8 @@ import org.apache.beam.sdk.util.VarInt;
* also provides random access to bytes stored within. This wrapper allows users to finely
* control the number of byte copies that occur.
*
- * Anything stored within the in-memory buffer from offset {@link #size()} is considered temporary
- * unused storage.
+ * <p>Anything stored within the in-memory buffer from offset {@link #size()} is considered
+ * temporary unused storage.
*/
@NotThreadSafe
public class RandomAccessData {
@@ -54,7 +54,7 @@ public class RandomAccessData {
* This follows the same encoding scheme as {@link ByteArrayCoder}.
* This coder is deterministic and consistent with equals.
*
- * This coder does not support encoding positive infinity.
+ * <p>This coder does not support encoding positive infinity.
*/
public static class RandomAccessDataCoder extends AtomicCoder<RandomAccessData> {
private static final RandomAccessDataCoder INSTANCE = new RandomAccessDataCoder();
@@ -192,7 +192,7 @@ public class RandomAccessData {
* is strictly greater than this. Note that if this is empty or is all 0xFF then
* a token value of positive infinity is returned.
*
- * The {@link UnsignedLexicographicalComparator} supports comparing {@link RandomAccessData}
+ * <p>The {@link UnsignedLexicographicalComparator} supports comparing {@link RandomAccessData}
* with support for positive infinitiy.
*/
public RandomAccessData increment() throws IOException {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunner.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunner.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunner.java
index 3888ec2..188479c 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunner.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunner.java
@@ -59,7 +59,7 @@ import org.slf4j.LoggerFactory;
* EvaluationResult result = SparkRunner.create().run(p);
* }
*
- * To create a pipeline runner to run against a different spark cluster, with a custom master url
+ * <p>To create a pipeline runner to run against a different spark cluster, with a custom master url
* we would do the following:
*
* {@code
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/main/java/org/apache/beam/runners/spark/TestSparkRunner.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/TestSparkRunner.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/TestSparkRunner.java
index a1e5918..6ad6556 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/TestSparkRunner.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/TestSparkRunner.java
@@ -40,8 +40,8 @@ import org.apache.beam.sdk.values.POutput;
* EvaluationResult result = SparkRunner.create().run(p);
* }
*
- * To create a pipeline runner to run against a different spark cluster, with a custom master url we
- * would do the following:
+ * <p>To create a pipeline runner to run against a different spark cluster, with a custom master url
+ * we would do the following:
*
* {@code
* Pipeline p = [logic for pipeline creation]
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java
index b40e022..e63c660 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.io.Writable;
/**
* A {@code WritableCoder} is a {@link Coder} for a Java class that implements {@link Writable}.
*
- * <p> To use, specify the coder type on a PCollection:
+ * <p>To use, specify the coder type on a PCollection:
* <pre>
* {@code
* PCollection<MyRecord> records =
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateAware.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateAware.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateAware.java
index 0a30f9f..d78b437 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateAware.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateAware.java
@@ -24,7 +24,7 @@ package org.apache.beam.runners.spark.io.hadoop;
* that they produce shard names that adhere to the template in
* {@link HadoopIO.Write}.
*
- * Some common shard names are defined in
+ * <p>Some common shard names are defined in
* {@link org.apache.beam.sdk.io.ShardNameTemplate}.
*/
public interface ShardNameTemplateAware {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/GroupCombineFunctions.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/GroupCombineFunctions.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/GroupCombineFunctions.java
index eb4002e..18926bc 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/GroupCombineFunctions.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/GroupCombineFunctions.java
@@ -52,7 +52,7 @@ import scala.Tuple2;
*/
public class GroupCombineFunctions {
- /***
+ /**
* Apply {@link GroupByKeyViaGroupByKeyOnly.GroupByKeyOnly} to a Spark RDD.
*/
public static <K, V> JavaRDD<WindowedValue<KV<K, Iterable<V>>>> groupByKeyOnly(
@@ -71,7 +71,7 @@ public class GroupCombineFunctions {
.map(WindowingHelpers.<KV<K, Iterable<V>>>windowFunction());
}
- /***
+ /**
* Apply {@link GroupByKeyViaGroupByKeyOnly.GroupAlsoByWindow} to a Spark RDD.
*/
public static <K, V, W extends BoundedWindow> JavaRDD<WindowedValue<KV<K, Iterable<V>>>>
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TranslationUtils.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TranslationUtils.java b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TranslationUtils.java
index 9b156fe..9ad2a9e 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TranslationUtils.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TranslationUtils.java
@@ -165,7 +165,7 @@ public final class TranslationUtils {
}
}
- /***
+ /**
* Create SideInputs as Broadcast variables.
*
* @param views The {@link PCollectionView}s.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/test/java/org/apache/beam/runners/spark/translation/streaming/RecoverFromCheckpointStreamingTest.java
----------------------------------------------------------------------
diff --git a/runners/spark/src/test/java/org/apache/beam/runners/spark/translation/streaming/RecoverFromCheckpointStreamingTest.java b/runners/spark/src/test/java/org/apache/beam/runners/spark/translation/streaming/RecoverFromCheckpointStreamingTest.java
index 4a96690..05e9125 100644
--- a/runners/spark/src/test/java/org/apache/beam/runners/spark/translation/streaming/RecoverFromCheckpointStreamingTest.java
+++ b/runners/spark/src/test/java/org/apache/beam/runners/spark/translation/streaming/RecoverFromCheckpointStreamingTest.java
@@ -61,8 +61,8 @@ import org.junit.rules.TemporaryFolder;
/**
* Tests DStream recovery from checkpoint - recreate the job and continue (from checkpoint).
*
- * Tests Aggregators, which rely on Accumulators - Aggregators should be available, though state
- * is not preserved (Spark issue), so they start from initial value.
+ * <p>Tests Aggregators, which rely on Accumulators - Aggregators should be available, though
+ * state is not preserved (Spark issue), so they start from initial value.
* //TODO: after the runner supports recovering the state of Aggregators, update this test's
* expected values for the recovered (second) run.
*/
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml
----------------------------------------------------------------------
diff --git a/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml b/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml
index ae4fcba..ca42652 100644
--- a/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml
+++ b/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml
@@ -153,6 +153,9 @@ page at http://checkstyle.sourceforge.net/config.html -->
<property name="allowUndeclaredRTE" value="true"/>
</module>
+ <!-- Check that paragraph tags are used correctly in Javadoc. -->
+ <module name="JavadocParagraph"/>
+
<module name="JavadocType">
<property name="scope" value="protected"/>
<property name="severity" value="error"/>
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java
index 6ef02aa..f7ce3c2 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java
@@ -115,6 +115,7 @@ import org.apache.commons.compress.utils.CountingInputStream;
* }</pre>
*
* <h3>Permissions</h3>
+ *
* <p>Permission requirements depend on the {@link PipelineRunner} that is used to execute the
* Dataflow job. Please refer to the documentation of corresponding {@link PipelineRunner}s for
* more details.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java
index 5fd7b8a..cd4d7db 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java
@@ -85,11 +85,13 @@ public abstract class BoundedSource<T> extends Source<T> {
* operations, such as progress estimation and dynamic work rebalancing.
*
* <h3>Boundedness</h3>
- * <p>Once {@link #start} or {@link #advance} has returned false, neither will be called
+ *
+ * <p>Once {@link #start} or {@link #advance} has returned false, neither will be called
* again on this object.
*
* <h3>Thread safety</h3>
- * All methods will be run from the same thread except {@link #splitAtFraction},
+ *
+ * <p>All methods will be run from the same thread except {@link #splitAtFraction},
* {@link #getFractionConsumed}, {@link #getCurrentSource}, {@link #getSplitPointsConsumed()},
* and {@link #getSplitPointsRemaining()}, all of which can be called concurrently
* from a different thread. There will not be multiple concurrent calls to
@@ -106,7 +108,8 @@ public abstract class BoundedSource<T> extends Source<T> {
* {@link #getCurrentSource} which do not change between {@link #splitAtFraction} calls.
*
* <h3>Implementing {@link #splitAtFraction}</h3>
- * In the course of dynamic work rebalancing, the method {@link #splitAtFraction}
+ *
+ * <p>In the course of dynamic work rebalancing, the method {@link #splitAtFraction}
* may be called concurrently with {@link #advance} or {@link #start}. It is critical that
* their interaction is implemented in a thread-safe way, otherwise data loss is possible.
*
@@ -261,14 +264,17 @@ public abstract class BoundedSource<T> extends Source<T> {
* (including items already read).
*
* <h3>Usage</h3>
+ *
* <p>Reader subclasses can use this method for convenience to access unchanging properties of
* the source being read. Alternatively, they can cache these properties in the constructor.
+ *
* <p>The framework will call this method in the course of dynamic work rebalancing, e.g. after
* a successful {@link BoundedSource.BoundedReader#splitAtFraction} call.
*
* <h3>Mutability and thread safety</h3>
- * Remember that {@link Source} objects must always be immutable. However, the return value of
- * this function may be affected by dynamic work rebalancing, happening asynchronously via
+ *
+ * <p>Remember that {@link Source} objects must always be immutable. However, the return value
+ * of this function may be affected by dynamic work rebalancing, happening asynchronously via
* {@link BoundedSource.BoundedReader#splitAtFraction}, meaning it can return a different
* {@link Source} object. However, the returned object itself will still itself be immutable.
* Callers must take care not to rely on properties of the returned source that may be
@@ -276,7 +282,8 @@ public abstract class BoundedSource<T> extends Source<T> {
* reading a file).
*
* <h3>Implementation</h3>
- * For convenience, subclasses should usually return the most concrete subclass of
+ *
+ * <p>For convenience, subclasses should usually return the most concrete subclass of
* {@link Source} possible.
* In practice, the implementation of this method should nearly always be one of the following:
* <ul>
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/FileBasedSink.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/FileBasedSink.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/FileBasedSink.java
index ea95f2f..f571d50 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/FileBasedSink.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/FileBasedSink.java
@@ -185,6 +185,7 @@ public abstract class FileBasedSink<T> extends Sink<T> {
* {@link FileBasedSink#fileNamingTemplate}.
*
* <h2>Temporary Bundle File Handling:</h2>
+ *
* <p>{@link FileBasedSink.FileBasedWriteOperation#temporaryFileRetention} controls the behavior
* for managing temporary files. By default, temporary files will be removed. Subclasses can
* provide a different value to the constructor.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/OffsetBasedSource.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/OffsetBasedSource.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/OffsetBasedSource.java
index 6c685ff..6e49cc3 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/OffsetBasedSource.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/OffsetBasedSource.java
@@ -248,7 +248,9 @@ public abstract class OffsetBasedSource<T> extends BoundedSource<T> {
* Returns the <i>starting</i> offset of the {@link Source.Reader#getCurrent current record},
* which has been read by the last successful {@link Source.Reader#start} or
* {@link Source.Reader#advance} call.
+ *
* <p>If no such call has been made yet, the return value is unspecified.
+ *
* <p>See {@link RangeTracker} for description of offset semantics.
*/
protected abstract long getCurrentOffset() throws NoSuchElementException;
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java
index d113457..2f9054f 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java
@@ -59,6 +59,7 @@ import org.slf4j.LoggerFactory;
* and consume unbounded {@link PCollection PCollections}.
*
* <h3>Permissions</h3>
+ *
* <p>Permission requirements depend on the {@link PipelineRunner} that is used to execute the
* Dataflow job. Please refer to the documentation of corresponding
* {@link PipelineRunner PipelineRunners} for more details.
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Sink.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Sink.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Sink.java
index 1abcc3d..3f49eac 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Sink.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Sink.java
@@ -71,6 +71,7 @@ import org.apache.beam.sdk.values.PCollection;
* </ul>
*
* <h2>WriteOperation</h2>
+ *
* <p>{@link WriteOperation#initialize} and {@link WriteOperation#finalize} are conceptually called
* once: at the beginning and end of a Write transform. However, implementors must ensure that these
* methods are idempotent, as they may be called multiple times on different machines in the case of
@@ -91,6 +92,7 @@ import org.apache.beam.sdk.values.PCollection;
* these mutations will not be visible in finalize).
*
* <h2>Bundle Ids:</h2>
+ *
* <p>In order to ensure fault-tolerance, a bundle may be executed multiple times (e.g., in the
* event of failure/retry or for redundancy). However, exactly one of these executions will have its
* result passed to the WriteOperation's finalize method. Each call to {@link Writer#open} is passed
@@ -110,6 +112,7 @@ import org.apache.beam.sdk.values.PCollection;
* of output file names that it can then merge or rename using some bundle naming scheme.
*
* <h2>Writer Results:</h2>
+ *
* <p>{@link WriteOperation}s and {@link Writer}s must agree on a writer result type that will be
* returned by a Writer after it writes a bundle. This type can be a client-defined object or an
* existing type; {@link WriteOperation#getWriterResultCoder} should return a {@link Coder} for the
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TextIO.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TextIO.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TextIO.java
index 62d3ae8..6ec4533 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TextIO.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TextIO.java
@@ -107,6 +107,7 @@ import org.apache.beam.sdk.values.PDone;
* }</pre>
*
* <h3>Permissions</h3>
+ *
* <p>When run using the {@code DirectRunner}, your pipeline can read and write text files
* on your local drive and remote text files on Google Cloud Storage that you have access to using
* your {@code gcloud} credentials. When running in the Dataflow service, the pipeline can only
@@ -230,7 +231,7 @@ public class TextIO {
/**
* Returns a new transform for reading from text files that's like this one but
- * that uses the given {@link Coder Coder<X>} to decode each of the
+ * that uses the given {@link Coder Coder<X>} to decode each of the
* lines of the file into a value of type {@code X}.
*
* <p>Does not modify this object.
@@ -602,8 +603,8 @@ public class TextIO {
/**
* Returns a transform for writing to text files that's like this one
- * but that uses the given {@link Coder Coder<X>} to encode each of
- * the elements of the input {@link PCollection PCollection<X>} into an
+ * but that uses the given {@link Coder Coder<X>} to encode each of
+ * the elements of the input {@link PCollection PCollection<X>} into an
* output text line. Does not modify this object.
*
* @param <X> the type of the elements of the input {@link PCollection}
@@ -853,7 +854,7 @@ public class TextIO {
* A {@link org.apache.beam.sdk.io.FileBasedSource.FileBasedReader FileBasedReader}
* which can decode records delimited by newline characters.
*
- * See {@link TextSource} for further details.
+ * <p>See {@link TextSource} for further details.
*/
@VisibleForTesting
static class TextBasedReader<T> extends FileBasedReader<T> {
@@ -985,7 +986,7 @@ public class TextIO {
/**
* Decodes the current element updating the buffer to only contain the unconsumed bytes.
*
- * This invalidates the currently stored {@code startOfSeparatorInBuffer} and
+ * <p>This invalidates the currently stored {@code startOfSeparatorInBuffer} and
* {@code endOfSeparatorInBuffer}.
*/
private void decodeCurrentElement() throws IOException {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Write.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Write.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Write.java
index 9d0beb7..e8b19d9 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Write.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Write.java
@@ -69,7 +69,7 @@ import org.slf4j.LoggerFactory;
* <p>Example usage with runner-controlled sharding:
*
* <pre>{@code p.apply(Write.to(new MySink(...)));}</pre>
-
+ *
* <p>Example usage with a fixed number of shards:
*
* <pre>{@code p.apply(Write.to(new MySink(...)).withNumShards(3));}</pre>
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/RangeTracker.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/RangeTracker.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/RangeTracker.java
index ad2f119..f352f01 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/RangeTracker.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/RangeTracker.java
@@ -192,6 +192,7 @@ public interface RangeTracker<PositionT> {
* <li>Otherwise, updates the last-consumed position to {@code recordStart} and returns
* {@code true}.
* </ul>
+ *
* <p>This method MUST be called on all split point records. It may be called on every record.
*/
boolean tryReturnRecordAt(boolean isAtSplitPoint, PositionT recordStart);
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/options/PipelineOptions.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/options/PipelineOptions.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/options/PipelineOptions.java
index 3e810e9..deb1cf4 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/options/PipelineOptions.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/options/PipelineOptions.java
@@ -93,7 +93,7 @@ import org.joda.time.format.DateTimeFormatter;
*
* <h2>Defining Your Own PipelineOptions</h2>
*
- * Defining your own {@link PipelineOptions} is the way for you to make configuration
+ * <p>Defining your own {@link PipelineOptions} is the way for you to make configuration
* options available for both local execution and execution via a {@link PipelineRunner}.
* By having PipelineOptionsFactory as your command-line interpreter, you will provide
* a standardized way for users to interact with your application via the command-line.
@@ -117,7 +117,7 @@ import org.joda.time.format.DateTimeFormatter;
*
* <h3>Restrictions</h3>
*
- * Since PipelineOptions can be "cast" to multiple types dynamically using
+ * <p>Since PipelineOptions can be "cast" to multiple types dynamically using
* {@link PipelineOptions#as(Class)}, a property must conform to the following set of restrictions:
* <ul>
* <li>Any property with the same name must have the same return type for all derived
@@ -134,7 +134,7 @@ import org.joda.time.format.DateTimeFormatter;
*
* <h3>Annotations For PipelineOptions</h3>
*
- * {@link Description @Description} can be used to annotate an interface or a getter
+ * <p>{@link Description @Description} can be used to annotate an interface or a getter
* with useful information which is output when {@code --help}
* is invoked via {@link PipelineOptionsFactory#fromArgs(String[])}.
*
@@ -158,7 +158,7 @@ import org.joda.time.format.DateTimeFormatter;
*
* <h2>Registration Of PipelineOptions</h2>
*
- * Registration of {@link PipelineOptions} by an application guarantees that the
+ * <p>Registration of {@link PipelineOptions} by an application guarantees that the
* {@link PipelineOptions} is composable during execution of their {@link Pipeline} and
* meets the restrictions listed above or will fail during registration. Registration
* also lists the registered {@link PipelineOptions} when {@code --help}
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ProxyInvocationHandler.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ProxyInvocationHandler.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ProxyInvocationHandler.java
index aa6f500..c438a43 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ProxyInvocationHandler.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ProxyInvocationHandler.java
@@ -198,7 +198,7 @@ class ProxyInvocationHandler implements InvocationHandler, HasDisplayData {
* Backing implementation for {@link PipelineOptions#as(Class)}.
*
* @param iface The interface that the returned object needs to implement.
- * @return An object that implements the interface <T>.
+ * @return An object that implements the interface {@code <T>}.
*/
synchronized <T extends PipelineOptions> T as(Class<T> iface) {
checkNotNull(iface);
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java
index bd44c48..1021b2f 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java
@@ -1047,14 +1047,14 @@ class SerializableMatchers implements Serializable {
* the {@link Matcher} returned by {@link SerializableSupplier#get() get()} when it is invoked
* during matching (which may occur on another machine, such as a Dataflow worker).
*
- * <code>
+ * <pre>{@code
* return fromSupplier(new SerializableSupplier<Matcher<T>>() {
* * @Override
* public Matcher<T> get() {
* return new MyMatcherForT();
* }
* });
- * </code>
+ * }</pre>
*/
public static <T> SerializableMatcher<T> fromSupplier(
SerializableSupplier<Matcher<T>> supplier) {
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SourceTestUtils.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SourceTestUtils.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SourceTestUtils.java
index e38e1af..dd62aeb 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SourceTestUtils.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SourceTestUtils.java
@@ -285,7 +285,8 @@ public class SourceTestUtils {
* Asserts that the {@code source}'s reader either fails to {@code splitAtFraction(fraction)}
* after reading {@code numItemsToReadBeforeSplit} items, or succeeds in a way that is
* consistent according to {@link #assertSplitAtFractionSucceedsAndConsistent}.
- * <p> Returns SplitAtFractionResult.
+ *
+ * <p>Returns SplitAtFractionResult.
*/
public static <T> SplitAtFractionResult assertSplitAtFractionBehavior(
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ApproximateQuantiles.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ApproximateQuantiles.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ApproximateQuantiles.java
index 656bd7b..ed3a253 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ApproximateQuantiles.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ApproximateQuantiles.java
@@ -207,8 +207,9 @@ public class ApproximateQuantiles {
* </pre>
*
* <p>The default error bound is {@code 1 / N}, though in practice
- * the accuracy tends to be much better. <p>See
- * {@link #create(int, Comparator, long, double)} for
+ * the accuracy tends to be much better.
+ *
+ * <p>See {@link #create(int, Comparator, long, double)} for
* more information about the meaning of {@code epsilon}, and
* {@link #withEpsilon} for a convenient way to adjust it.
*
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Combine.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Combine.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Combine.java
index a00dcba..e9216e1 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Combine.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Combine.java
@@ -672,7 +672,7 @@ public class Combine {
* An abstract subclass of {@link CombineFn} for implementing combiners that are more
* easily and efficiently expressed as binary operations on <code>int</code>s
*
- * <p> It uses {@code int[0]} as the mutable accumulator.
+ * <p>It uses {@code int[0]} as the mutable accumulator.
*/
public abstract static class BinaryCombineIntegerFn extends CombineFn<Integer, int[], Integer> {
@@ -774,7 +774,7 @@ public class Combine {
* An abstract subclass of {@link CombineFn} for implementing combiners that are more
* easily and efficiently expressed as binary operations on <code>long</code>s.
*
- * <p> It uses {@code long[0]} as the mutable accumulator.
+ * <p>It uses {@code long[0]} as the mutable accumulator.
*/
public abstract static class BinaryCombineLongFn extends CombineFn<Long, long[], Long> {
/**
@@ -873,7 +873,7 @@ public class Combine {
* An abstract subclass of {@link CombineFn} for implementing combiners that are more
* easily and efficiently expressed as binary operations on <code>double</code>s.
*
- * <p> It uses {@code double[0]} as the mutable accumulator.
+ * <p>It uses {@code double[0]} as the mutable accumulator.
*/
public abstract static class BinaryCombineDoubleFn extends CombineFn<Double, double[], Double> {
[3/3] incubator-beam git commit: Closes #1060
Posted by dh...@apache.org.
Closes #1060
Project: http://git-wip-us.apache.org/repos/asf/incubator-beam/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-beam/commit/135790bc
Tree: http://git-wip-us.apache.org/repos/asf/incubator-beam/tree/135790bc
Diff: http://git-wip-us.apache.org/repos/asf/incubator-beam/diff/135790bc
Branch: refs/heads/master
Commit: 135790bc97e84b79d4b6d3728b86961fa738a212
Parents: 7c2124b 9e30a98
Author: Dan Halperin <dh...@google.com>
Authored: Tue Oct 11 16:39:15 2016 -0700
Committer: Dan Halperin <dh...@google.com>
Committed: Tue Oct 11 16:39:15 2016 -0700
----------------------------------------------------------------------
.../beam/examples/cookbook/TriggerExample.java | 28 ++++++++++----------
.../beam/examples/complete/game/GameStats.java | 8 +++---
.../examples/complete/game/HourlyTeamScore.java | 6 ++---
.../examples/complete/game/LeaderBoard.java | 10 +++----
.../beam/examples/complete/game/UserScore.java | 8 +++---
.../complete/game/injector/Injector.java | 10 +++----
.../apache/beam/runners/core/DoFnRunner.java | 2 +-
.../runners/direct/DirectExecutionContext.java | 2 +-
.../beam/runners/direct/DirectRunner.java | 13 +++++----
.../direct/ExecutorServiceParallelExecutor.java | 2 +-
.../beam/runners/direct/TransformEvaluator.java | 2 +-
.../beam/runners/direct/TransformResult.java | 4 +--
.../runners/direct/ViewEvaluatorFactory.java | 2 +-
.../beam/runners/direct/WatermarkManager.java | 4 +--
.../beam/runners/flink/examples/TFIDF.java | 12 ++++-----
.../beam/runners/flink/examples/WordCount.java | 4 +--
.../flink/examples/streaming/AutoComplete.java | 4 +--
.../runners/flink/FlinkPipelineOptions.java | 8 +++---
.../apache/beam/runners/flink/FlinkRunner.java | 1 -
.../runners/flink/FlinkRunnerRegistrar.java | 4 +--
.../functions/FlinkMultiOutputDoFnFunction.java | 2 +-
.../beam/runners/flink/FlinkTestPipeline.java | 6 ++---
.../beam/runners/dataflow/DataflowRunner.java | 9 ++++---
.../dataflow/internal/AssignWindows.java | 6 ++---
.../runners/dataflow/internal/IsmFormat.java | 8 +++---
.../options/DataflowPipelineDebugOptions.java | 3 +--
.../runners/dataflow/util/RandomAccessData.java | 8 +++---
.../apache/beam/runners/spark/SparkRunner.java | 2 +-
.../beam/runners/spark/TestSparkRunner.java | 4 +--
.../runners/spark/coders/WritableCoder.java | 2 +-
.../spark/io/hadoop/ShardNameTemplateAware.java | 2 +-
.../translation/GroupCombineFunctions.java | 4 +--
.../spark/translation/TranslationUtils.java | 2 +-
.../RecoverFromCheckpointStreamingTest.java | 4 +--
.../src/main/resources/beam/checkstyle.xml | 3 +++
.../java/org/apache/beam/sdk/io/AvroSource.java | 1 +
.../org/apache/beam/sdk/io/BoundedSource.java | 19 ++++++++-----
.../org/apache/beam/sdk/io/FileBasedSink.java | 1 +
.../apache/beam/sdk/io/OffsetBasedSource.java | 2 ++
.../java/org/apache/beam/sdk/io/PubsubIO.java | 1 +
.../main/java/org/apache/beam/sdk/io/Sink.java | 3 +++
.../java/org/apache/beam/sdk/io/TextIO.java | 11 ++++----
.../main/java/org/apache/beam/sdk/io/Write.java | 2 +-
.../apache/beam/sdk/io/range/RangeTracker.java | 1 +
.../beam/sdk/options/PipelineOptions.java | 8 +++---
.../sdk/options/ProxyInvocationHandler.java | 2 +-
.../beam/sdk/testing/SerializableMatchers.java | 4 +--
.../beam/sdk/testing/SourceTestUtils.java | 3 ++-
.../sdk/transforms/ApproximateQuantiles.java | 5 ++--
.../org/apache/beam/sdk/transforms/Combine.java | 6 ++---
.../apache/beam/sdk/transforms/CombineFns.java | 8 +++---
.../beam/sdk/transforms/CombineWithContext.java | 4 +--
.../apache/beam/sdk/transforms/GroupByKey.java | 4 +--
.../org/apache/beam/sdk/transforms/Latest.java | 12 ++++-----
.../org/apache/beam/sdk/transforms/OldDoFn.java | 2 +-
.../apache/beam/sdk/transforms/PTransform.java | 8 +++---
.../org/apache/beam/sdk/transforms/ParDo.java | 14 +++++-----
.../beam/sdk/transforms/RemoveDuplicates.java | 5 ++--
.../org/apache/beam/sdk/transforms/ViewFn.java | 2 +-
.../apache/beam/sdk/transforms/WithKeys.java | 2 +-
.../sdk/transforms/display/DisplayData.java | 8 +++---
.../sdk/transforms/reflect/DoFnInvoker.java | 2 +-
.../transforms/windowing/AfterWatermark.java | 4 +--
.../beam/sdk/transforms/windowing/Never.java | 3 +--
.../beam/sdk/transforms/windowing/PaneInfo.java | 4 +--
.../transforms/windowing/SlidingWindows.java | 3 +--
.../beam/sdk/transforms/windowing/Window.java | 18 ++++++-------
.../beam/sdk/util/BaseExecutionContext.java | 4 +--
.../sdk/util/ExposedByteArrayOutputStream.java | 1 +
.../apache/beam/sdk/util/GatherAllPanes.java | 10 +++----
.../beam/sdk/util/PerKeyCombineFnRunners.java | 4 +--
.../org/apache/beam/sdk/util/PubsubClient.java | 3 +++
.../apache/beam/sdk/util/PubsubTestClient.java | 2 +-
.../apache/beam/sdk/util/TimerInternals.java | 2 +-
.../java/org/apache/beam/sdk/values/PInput.java | 2 +-
.../apache/beam/sdk/values/TypeDescriptors.java | 16 +++++------
.../beam/sdk/testing/SystemNanoTimeSleeper.java | 2 +-
.../beam/sdk/transforms/DoFnTesterTest.java | 2 +-
.../sdk/io/gcp/bigquery/BigQueryAvroUtils.java | 2 +-
.../beam/sdk/io/gcp/bigquery/BigQueryIO.java | 5 ++++
.../sdk/io/gcp/bigquery/BigQueryServices.java | 2 +-
.../beam/sdk/io/gcp/datastore/DatastoreIO.java | 2 +-
.../beam/sdk/io/gcp/datastore/DatastoreV1.java | 2 +-
.../sdk/io/gcp/datastore/SplitQueryFnIT.java | 4 +--
.../beam/sdk/io/gcp/datastore/V1ReadIT.java | 2 +-
.../beam/sdk/io/gcp/datastore/V1WriteIT.java | 2 +-
.../apache/beam/sdk/io/hdfs/HDFSFileSource.java | 3 ++-
.../apache/beam/sdk/io/hdfs/WritableCoder.java | 2 +-
.../SimpleAuthAvroHDFSFileSource.java | 2 +-
.../hdfs/simpleauth/SimpleAuthHDFSFileSink.java | 2 +-
.../simpleauth/SimpleAuthHDFSFileSource.java | 2 +-
.../java/org/apache/beam/sdk/io/jms/JmsIO.java | 8 +++---
.../org/apache/beam/sdk/io/kafka/KafkaIO.java | 16 +++++------
.../beam/sdk/io/kinesis/CustomOptional.java | 2 +-
.../sdk/io/kinesis/GetKinesisRecordsResult.java | 2 +-
.../sdk/io/kinesis/KinesisClientProvider.java | 2 +-
.../apache/beam/sdk/io/kinesis/KinesisIO.java | 14 +++++-----
.../beam/sdk/io/kinesis/KinesisReader.java | 10 +++----
.../sdk/io/kinesis/KinesisReaderCheckpoint.java | 4 +--
.../beam/sdk/io/kinesis/KinesisRecordCoder.java | 2 +-
.../beam/sdk/io/kinesis/KinesisSource.java | 6 ++---
.../beam/sdk/io/kinesis/RecordFilter.java | 6 ++---
.../apache/beam/sdk/io/kinesis/RoundRobin.java | 2 +-
.../beam/sdk/io/kinesis/ShardCheckpoint.java | 6 ++---
.../sdk/io/kinesis/ShardRecordsIterator.java | 4 +--
.../sdk/io/kinesis/SimplifiedKinesisClient.java | 8 +++---
.../beam/sdk/io/kinesis/StartingPoint.java | 2 +-
.../beam/sdk/io/kinesis/KinesisTestOptions.java | 2 +-
.../beam/sdk/io/kinesis/KinesisUploader.java | 2 +-
.../beam/sdk/io/mongodb/MongoDbGridFSIO.java | 6 ++---
.../apache/beam/sdk/io/mongodb/MongoDbIO.java | 7 +++--
111 files changed, 293 insertions(+), 269 deletions(-)
----------------------------------------------------------------------