checkstyle: improve Javadoc checking

* Enable JavadocParagraph, which gives a consistent style in how we format 
paragraphs.
  This caught a LOT of missing tags and other issues.

* This also caught a lot of HTML issues that led to mis-rendering Javadoc.


Project: http://git-wip-us.apache.org/repos/asf/incubator-beam/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-beam/commit/9e30a989
Tree: http://git-wip-us.apache.org/repos/asf/incubator-beam/tree/9e30a989
Diff: http://git-wip-us.apache.org/repos/asf/incubator-beam/diff/9e30a989

Branch: refs/heads/master
Commit: 9e30a989dddb95854e0c403d58c1b5fb137ee720
Parents: 7c2124b
Author: Dan Halperin <dhalp...@google.com>
Authored: Wed Oct 5 20:24:00 2016 -0700
Committer: Dan Halperin <dhalp...@google.com>
Committed: Tue Oct 11 16:08:15 2016 -0700

----------------------------------------------------------------------
 .../beam/examples/cookbook/TriggerExample.java  | 28 ++++++++++----------
 .../beam/examples/complete/game/GameStats.java  |  8 +++---
 .../examples/complete/game/HourlyTeamScore.java |  6 ++---
 .../examples/complete/game/LeaderBoard.java     | 10 +++----
 .../beam/examples/complete/game/UserScore.java  |  8 +++---
 .../complete/game/injector/Injector.java        | 10 +++----
 .../apache/beam/runners/core/DoFnRunner.java    |  2 +-
 .../runners/direct/DirectExecutionContext.java  |  2 +-
 .../beam/runners/direct/DirectRunner.java       | 13 +++++----
 .../direct/ExecutorServiceParallelExecutor.java |  2 +-
 .../beam/runners/direct/TransformEvaluator.java |  2 +-
 .../beam/runners/direct/TransformResult.java    |  4 +--
 .../runners/direct/ViewEvaluatorFactory.java    |  2 +-
 .../beam/runners/direct/WatermarkManager.java   |  4 +--
 .../beam/runners/flink/examples/TFIDF.java      | 12 ++++-----
 .../beam/runners/flink/examples/WordCount.java  |  4 +--
 .../flink/examples/streaming/AutoComplete.java  |  4 +--
 .../runners/flink/FlinkPipelineOptions.java     |  8 +++---
 .../apache/beam/runners/flink/FlinkRunner.java  |  1 -
 .../runners/flink/FlinkRunnerRegistrar.java     |  4 +--
 .../functions/FlinkMultiOutputDoFnFunction.java |  2 +-
 .../beam/runners/flink/FlinkTestPipeline.java   |  6 ++---
 .../beam/runners/dataflow/DataflowRunner.java   |  9 ++++---
 .../dataflow/internal/AssignWindows.java        |  6 ++---
 .../runners/dataflow/internal/IsmFormat.java    |  8 +++---
 .../options/DataflowPipelineDebugOptions.java   |  3 +--
 .../runners/dataflow/util/RandomAccessData.java |  8 +++---
 .../apache/beam/runners/spark/SparkRunner.java  |  2 +-
 .../beam/runners/spark/TestSparkRunner.java     |  4 +--
 .../runners/spark/coders/WritableCoder.java     |  2 +-
 .../spark/io/hadoop/ShardNameTemplateAware.java |  2 +-
 .../translation/GroupCombineFunctions.java      |  4 +--
 .../spark/translation/TranslationUtils.java     |  2 +-
 .../RecoverFromCheckpointStreamingTest.java     |  4 +--
 .../src/main/resources/beam/checkstyle.xml      |  3 +++
 .../java/org/apache/beam/sdk/io/AvroSource.java |  1 +
 .../org/apache/beam/sdk/io/BoundedSource.java   | 19 ++++++++-----
 .../org/apache/beam/sdk/io/FileBasedSink.java   |  1 +
 .../apache/beam/sdk/io/OffsetBasedSource.java   |  2 ++
 .../java/org/apache/beam/sdk/io/PubsubIO.java   |  1 +
 .../main/java/org/apache/beam/sdk/io/Sink.java  |  3 +++
 .../java/org/apache/beam/sdk/io/TextIO.java     | 11 ++++----
 .../main/java/org/apache/beam/sdk/io/Write.java |  2 +-
 .../apache/beam/sdk/io/range/RangeTracker.java  |  1 +
 .../beam/sdk/options/PipelineOptions.java       |  8 +++---
 .../sdk/options/ProxyInvocationHandler.java     |  2 +-
 .../beam/sdk/testing/SerializableMatchers.java  |  4 +--
 .../beam/sdk/testing/SourceTestUtils.java       |  3 ++-
 .../sdk/transforms/ApproximateQuantiles.java    |  5 ++--
 .../org/apache/beam/sdk/transforms/Combine.java |  6 ++---
 .../apache/beam/sdk/transforms/CombineFns.java  |  8 +++---
 .../beam/sdk/transforms/CombineWithContext.java |  4 +--
 .../apache/beam/sdk/transforms/GroupByKey.java  |  4 +--
 .../org/apache/beam/sdk/transforms/Latest.java  | 12 ++++-----
 .../org/apache/beam/sdk/transforms/OldDoFn.java |  2 +-
 .../apache/beam/sdk/transforms/PTransform.java  |  8 +++---
 .../org/apache/beam/sdk/transforms/ParDo.java   | 14 +++++-----
 .../beam/sdk/transforms/RemoveDuplicates.java   |  5 ++--
 .../org/apache/beam/sdk/transforms/ViewFn.java  |  2 +-
 .../apache/beam/sdk/transforms/WithKeys.java    |  2 +-
 .../sdk/transforms/display/DisplayData.java     |  8 +++---
 .../sdk/transforms/reflect/DoFnInvoker.java     |  2 +-
 .../transforms/windowing/AfterWatermark.java    |  4 +--
 .../beam/sdk/transforms/windowing/Never.java    |  3 +--
 .../beam/sdk/transforms/windowing/PaneInfo.java |  4 +--
 .../transforms/windowing/SlidingWindows.java    |  3 +--
 .../beam/sdk/transforms/windowing/Window.java   | 18 ++++++-------
 .../beam/sdk/util/BaseExecutionContext.java     |  4 +--
 .../sdk/util/ExposedByteArrayOutputStream.java  |  1 +
 .../apache/beam/sdk/util/GatherAllPanes.java    | 10 +++----
 .../beam/sdk/util/PerKeyCombineFnRunners.java   |  4 +--
 .../org/apache/beam/sdk/util/PubsubClient.java  |  3 +++
 .../apache/beam/sdk/util/PubsubTestClient.java  |  2 +-
 .../apache/beam/sdk/util/TimerInternals.java    |  2 +-
 .../java/org/apache/beam/sdk/values/PInput.java |  2 +-
 .../apache/beam/sdk/values/TypeDescriptors.java | 16 +++++------
 .../beam/sdk/testing/SystemNanoTimeSleeper.java |  2 +-
 .../beam/sdk/transforms/DoFnTesterTest.java     |  2 +-
 .../sdk/io/gcp/bigquery/BigQueryAvroUtils.java  |  2 +-
 .../beam/sdk/io/gcp/bigquery/BigQueryIO.java    |  5 ++++
 .../sdk/io/gcp/bigquery/BigQueryServices.java   |  2 +-
 .../beam/sdk/io/gcp/datastore/DatastoreIO.java  |  2 +-
 .../beam/sdk/io/gcp/datastore/DatastoreV1.java  |  2 +-
 .../sdk/io/gcp/datastore/SplitQueryFnIT.java    |  4 +--
 .../beam/sdk/io/gcp/datastore/V1ReadIT.java     |  2 +-
 .../beam/sdk/io/gcp/datastore/V1WriteIT.java    |  2 +-
 .../apache/beam/sdk/io/hdfs/HDFSFileSource.java |  3 ++-
 .../apache/beam/sdk/io/hdfs/WritableCoder.java  |  2 +-
 .../SimpleAuthAvroHDFSFileSource.java           |  2 +-
 .../hdfs/simpleauth/SimpleAuthHDFSFileSink.java |  2 +-
 .../simpleauth/SimpleAuthHDFSFileSource.java    |  2 +-
 .../java/org/apache/beam/sdk/io/jms/JmsIO.java  |  8 +++---
 .../org/apache/beam/sdk/io/kafka/KafkaIO.java   | 16 +++++------
 .../beam/sdk/io/kinesis/CustomOptional.java     |  2 +-
 .../sdk/io/kinesis/GetKinesisRecordsResult.java |  2 +-
 .../sdk/io/kinesis/KinesisClientProvider.java   |  2 +-
 .../apache/beam/sdk/io/kinesis/KinesisIO.java   | 14 +++++-----
 .../beam/sdk/io/kinesis/KinesisReader.java      | 10 +++----
 .../sdk/io/kinesis/KinesisReaderCheckpoint.java |  4 +--
 .../beam/sdk/io/kinesis/KinesisRecordCoder.java |  2 +-
 .../beam/sdk/io/kinesis/KinesisSource.java      |  6 ++---
 .../beam/sdk/io/kinesis/RecordFilter.java       |  6 ++---
 .../apache/beam/sdk/io/kinesis/RoundRobin.java  |  2 +-
 .../beam/sdk/io/kinesis/ShardCheckpoint.java    |  6 ++---
 .../sdk/io/kinesis/ShardRecordsIterator.java    |  4 +--
 .../sdk/io/kinesis/SimplifiedKinesisClient.java |  8 +++---
 .../beam/sdk/io/kinesis/StartingPoint.java      |  2 +-
 .../beam/sdk/io/kinesis/KinesisTestOptions.java |  2 +-
 .../beam/sdk/io/kinesis/KinesisUploader.java    |  2 +-
 .../beam/sdk/io/mongodb/MongoDbGridFSIO.java    |  6 ++---
 .../apache/beam/sdk/io/mongodb/MongoDbIO.java   |  7 +++--
 111 files changed, 293 insertions(+), 269 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/examples/java/src/main/java/org/apache/beam/examples/cookbook/TriggerExample.java
----------------------------------------------------------------------
diff --git 
a/examples/java/src/main/java/org/apache/beam/examples/cookbook/TriggerExample.java
 
b/examples/java/src/main/java/org/apache/beam/examples/cookbook/TriggerExample.java
index 68d4d32..d965d4a 100644
--- 
a/examples/java/src/main/java/org/apache/beam/examples/cookbook/TriggerExample.java
+++ 
b/examples/java/src/main/java/org/apache/beam/examples/cookbook/TriggerExample.java
@@ -61,11 +61,11 @@ import org.joda.time.Instant;
  * {@link org.apache.beam.sdk.transforms.windowing.Trigger triggers} to 
control when the results for
  * each window are emitted.
  *
- * <p> This example uses a portion of real traffic data from San Diego 
freeways. It contains
+ * <p>This example uses a portion of real traffic data from San Diego 
freeways. It contains
  * readings from sensor stations set up along each freeway. Each sensor 
reading includes a
  * calculation of the 'total flow' across all lanes in that freeway direction.
  *
- * <p> Concepts:
+ * <p>Concepts:
  * <pre>
  *   1. The default triggering behavior
  *   2. Late data with the default trigger
@@ -73,28 +73,28 @@ import org.joda.time.Instant;
  *   4. Combining late data and speculative estimates
  * </pre>
  *
- * <p> Before running this example, it will be useful to familiarize yourself 
with Beam triggers
+ * <p>Before running this example, it will be useful to familiarize yourself 
with Beam triggers
  * and understand the concept of 'late data',
  * See: <a href="http://beam.incubator.apache.org/use/walkthroughs/";>
  * http://beam.incubator.apache.org/use/walkthroughs/</a>
  *
- * <p> The example is configured to use the default BigQuery table from the 
example common package
+ * <p>The example is configured to use the default BigQuery table from the 
example common package
  * (there are no defaults for a general Beam pipeline).
  * You can override them by using the {@code --bigQueryDataset}, and {@code 
--bigQueryTable}
  * options. If the BigQuery table do not exist, the example will try to create 
them.
  *
- * <p> The pipeline outputs its results to a BigQuery table.
+ * <p>The pipeline outputs its results to a BigQuery table.
  * Here are some queries you can use to see interesting results:
  * Replace {@code <enter_table_name>} in the query below with the name of the 
BigQuery table.
  * Replace {@code <enter_window_interval>} in the query below with the window 
interval.
  *
- * <p> To see the results of the default trigger,
+ * <p>To see the results of the default trigger,
  * Note: When you start up your pipeline, you'll initially see results from 
'late' data. Wait after
  * the window duration, until the first pane of non-late data has been 
emitted, to see more
  * interesting results.
  * {@code SELECT * FROM enter_table_name WHERE trigger_type = "default" ORDER 
BY window DESC}
  *
- * <p> To see the late data i.e. dropped by the default trigger,
+ * <p>To see the late data i.e. dropped by the default trigger,
  * {@code SELECT * FROM <enter_table_name> WHERE trigger_type = 
"withAllowedLateness" and
  * (timing = "LATE" or timing = "ON_TIME") and freeway = "5" ORDER BY window 
DESC, processing_time}
  *
@@ -103,23 +103,23 @@ import org.joda.time.Instant;
  * (trigger_type = "withAllowedLateness" or trigger_type = "sequential") and 
freeway = "5" ORDER BY
  * window DESC, processing_time}
  *
- * <p> To see speculative results every minute,
+ * <p>To see speculative results every minute,
  * {@code SELECT * FROM <enter_table_name> WHERE trigger_type = "speculative" 
and freeway = "5"
  * ORDER BY window DESC, processing_time}
  *
- * <p> To see speculative results every five minutes after the end of the 
window
+ * <p>To see speculative results every five minutes after the end of the window
  * {@code SELECT * FROM <enter_table_name> WHERE trigger_type = "sequential" 
and timing != "EARLY"
  * and freeway = "5" ORDER BY window DESC, processing_time}
  *
- * <p> To see the first and the last pane for a freeway in a window for all 
the trigger types,
+ * <p>To see the first and the last pane for a freeway in a window for all the 
trigger types,
  * {@code SELECT * FROM <enter_table_name> WHERE (isFirst = true or isLast = 
true) ORDER BY window}
  *
- * <p> To reduce the number of results for each query we can add additional 
where clauses.
+ * <p>To reduce the number of results for each query we can add additional 
where clauses.
  * For examples, To see the results of the default trigger,
  * {@code SELECT * FROM <enter_table_name> WHERE trigger_type = "default" AND 
freeway = "5" AND
  * window = "<enter_window_interval>"}
  *
- * <p> The example will try to cancel the pipelines on the signal to terminate 
the process (CTRL-C)
+ * <p>The example will try to cancel the pipelines on the signal to terminate 
the process (CTRL-C)
  * and then exits.
  */
 
@@ -153,13 +153,13 @@ public class TriggerExample {
    * 5             | 60                 | 10:27:20   | 10:27:25
    * 5             | 60                 | 10:29:00   | 11:11:00
    *
-   * <p> Beam tracks a watermark which records up to what point in event time 
the data is
+   * <p>Beam tracks a watermark which records up to what point in event time 
the data is
    * complete. For the purposes of the example, we'll assume the watermark is 
approximately 15m
    * behind the current processing time. In practice, the actual value would 
vary over time based
    * on the systems knowledge of the current delay and contents of the backlog 
(data
    * that has not yet been processed).
    *
-   * <p> If the watermark is 15m behind, then the window [10:00:00, 10:30:00) 
(in event time) would
+   * <p>If the watermark is 15m behind, then the window [10:00:00, 10:30:00) 
(in event time) would
    * close at 10:44:59, when the watermark passes 10:30:00.
    */
   static class CalculateTotalFlow

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java
----------------------------------------------------------------------
diff --git 
a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java
 
b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java
index f9957eb..e39a9ff 100644
--- 
a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java
+++ 
b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java
@@ -62,21 +62,21 @@ import org.slf4j.LoggerFactory;
  * New concepts: session windows and finding session duration; use of both
  * singleton and non-singleton side inputs.
  *
- * <p> This pipeline builds on the {@link LeaderBoard} functionality, and adds 
some "business
+ * <p>This pipeline builds on the {@link LeaderBoard} functionality, and adds 
some "business
  * intelligence" analysis: abuse detection and usage patterns. The pipeline 
derives the Mean user
  * score sum for a window, and uses that information to identify likely 
spammers/robots. (The robots
  * have a higher click rate than the human users). The 'robot' users are then 
filtered out when
  * calculating the team scores.
  *
- * <p> Additionally, user sessions are tracked: that is, we find bursts of 
user activity using
+ * <p>Additionally, user sessions are tracked: that is, we find bursts of user 
activity using
  * session windows. Then, the mean session duration information is recorded in 
the context of
  * subsequent fixed windowing. (This could be used to tell us what games are 
giving us greater
  * user retention).
  *
- * <p> Run {@code org.apache.beam.examples.complete.game.injector.Injector} to 
generate
+ * <p>Run {@code org.apache.beam.examples.complete.game.injector.Injector} to 
generate
  * pubsub data for this pipeline. The {@code Injector} documentation provides 
more detail.
  *
- * <p> To execute this pipeline using the Dataflow service, specify the 
pipeline configuration
+ * <p>To execute this pipeline using the Dataflow service, specify the 
pipeline configuration
  * like this:
  * <pre>{@code
  *   --project=YOUR_PROJECT_ID

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java
----------------------------------------------------------------------
diff --git 
a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java
 
b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java
index cf13899..1231c91 100644
--- 
a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java
+++ 
b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java
@@ -44,7 +44,7 @@ import org.joda.time.format.DateTimeFormatter;
  * domain, following {@link UserScore}. In addition to the concepts introduced 
in {@link UserScore},
  * new concepts include: windowing and element timestamps; use of {@code 
Filter.by()}.
  *
- * <p> This pipeline processes data collected from gaming events in batch, 
building on {@link
+ * <p>This pipeline processes data collected from gaming events in batch, 
building on {@link
  * UserScore} but using fixed windows. It calculates the sum of scores per 
team, for each window,
  * optionally allowing specification of two timestamps before and after which 
data is filtered out.
  * This allows a model where late data collected after the intended analysis 
window can be included,
@@ -53,7 +53,7 @@ import org.joda.time.format.DateTimeFormatter;
  * {@link UserScore} pipeline. However, our batch processing is high-latency, 
in that we don't get
  * results from plays at the beginning of the batch's time period until the 
batch is processed.
  *
- * <p> To execute this pipeline using the Dataflow service, specify the 
pipeline configuration
+ * <p>To execute this pipeline using the Dataflow service, specify the 
pipeline configuration
  * like this:
  * <pre>{@code
  *   --project=YOUR_PROJECT_ID
@@ -64,7 +64,7 @@ import org.joda.time.format.DateTimeFormatter;
  * </pre>
  * where the BigQuery dataset you specify must already exist.
  *
- * <p> Optionally include {@code --input} to specify the batch input file path.
+ * <p>Optionally include {@code --input} to specify the batch input file path.
  * To indicate a time after which the data should be filtered out, include the
  * {@code --stopMin} arg. E.g., {@code --stopMin=2015-10-18-23-59} indicates 
that any data
  * timestamped after 23:59 PST on 2015-10-18 should not be included in the 
analysis.

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java
----------------------------------------------------------------------
diff --git 
a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java
 
b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java
index 13bbf44..18a5aa1 100644
--- 
a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java
+++ 
b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java
@@ -57,26 +57,26 @@ import org.joda.time.format.DateTimeFormatter;
  * early/speculative results; using .accumulatingFiredPanes() to do cumulative 
processing of late-
  * arriving data.
  *
- * <p> This pipeline processes an unbounded stream of 'game events'. The 
calculation of the team
+ * <p>This pipeline processes an unbounded stream of 'game events'. The 
calculation of the team
  * scores uses fixed windowing based on event time (the time of the game play 
event), not
  * processing time (the time that an event is processed by the pipeline). The 
pipeline calculates
  * the sum of scores per team, for each window. By default, the team scores 
are calculated using
  * one-hour windows.
  *
- * <p> In contrast-- to demo another windowing option-- the user scores are 
calculated using a
+ * <p>In contrast-- to demo another windowing option-- the user scores are 
calculated using a
  * global window, which periodically (every ten minutes) emits cumulative user 
score sums.
  *
- * <p> In contrast to the previous pipelines in the series, which used static, 
finite input data,
+ * <p>In contrast to the previous pipelines in the series, which used static, 
finite input data,
  * here we're using an unbounded data source, which lets us provide 
speculative results, and allows
  * handling of late data, at much lower latency. We can use the 
early/speculative results to keep a
  * 'leaderboard' updated in near-realtime. Our handling of late data lets us 
generate correct
  * results, e.g. for 'team prizes'. We're now outputting window results as 
they're
  * calculated, giving us much lower latency than with the previous batch 
examples.
  *
- * <p> Run {@link injector.Injector} to generate pubsub data for this 
pipeline.  The Injector
+ * <p>Run {@link injector.Injector} to generate pubsub data for this pipeline. 
 The Injector
  * documentation provides more detail on how to do this.
  *
- * <p> To execute this pipeline using the Dataflow service, specify the 
pipeline configuration
+ * <p>To execute this pipeline using the Dataflow service, specify the 
pipeline configuration
  * like this:
  * <pre>{@code
  *   --project=YOUR_PROJECT_ID

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java
----------------------------------------------------------------------
diff --git 
a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java
 
b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java
index f05879f..fc4e7f3 100644
--- 
a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java
+++ 
b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java
@@ -48,15 +48,15 @@ import org.slf4j.LoggerFactory;
  * BigQuery; using standalone DoFns; use of the sum by key transform; examples 
of
  * Java 8 lambda syntax.
  *
- * <p> In this gaming scenario, many users play, as members of different 
teams, over the course of a
+ * <p>In this gaming scenario, many users play, as members of different teams, 
over the course of a
  * day, and their actions are logged for processing.  Some of the logged game 
events may be late-
  * arriving, if users play on mobile devices and go transiently offline for a 
period.
  *
- * <p> This pipeline does batch processing of data collected from gaming 
events. It calculates the
+ * <p>This pipeline does batch processing of data collected from gaming 
events. It calculates the
  * sum of scores per user, over an entire batch of gaming data (collected, 
say, for each day). The
  * batch processing will not include any late data that arrives after the 
day's cutoff point.
  *
- * <p> To execute this pipeline using the Dataflow service and static example 
input data, specify
+ * <p>To execute this pipeline using the Dataflow service and static example 
input data, specify
  * the pipeline configuration like this:
  * <pre>{@code
  *   --project=YOUR_PROJECT_ID
@@ -67,7 +67,7 @@ import org.slf4j.LoggerFactory;
  * </pre>
  * where the BigQuery dataset you specify must already exist.
  *
- * <p> Optionally include the --input argument to specify a batch input file.
+ * <p>Optionally include the --input argument to specify a batch input file.
  * See the --input default value for example batch data file, or use {@link 
injector.Injector} to
  * generate your own batch data.
   */

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/examples/java8/src/main/java/org/apache/beam/examples/complete/game/injector/Injector.java
----------------------------------------------------------------------
diff --git 
a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/injector/Injector.java
 
b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/injector/Injector.java
index 8f8bd9f..8c23cd7 100644
--- 
a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/injector/Injector.java
+++ 
b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/injector/Injector.java
@@ -40,22 +40,22 @@ import org.joda.time.format.DateTimeFormatter;
  * This is a generator that simulates usage data from a mobile game, and 
either publishes the data
  * to a pubsub topic or writes it to a file.
  *
- * <p> The general model used by the generator is the following. There is a 
set of teams with team
+ * <p>The general model used by the generator is the following. There is a set 
of teams with team
  * members. Each member is scoring points for their team. After some period, a 
team will dissolve
  * and a new one will be created in its place. There is also a set of 
'Robots', or spammer users.
  * They hop from team to team. The robots are set to have a higher 'click 
rate' (generate more
  * events) than the regular team members.
  *
- * <p> Each generated line of data has the following form:
+ * <p>Each generated line of data has the following form:
  * username,teamname,score,timestamp_in_ms,readable_time
  * e.g.:
  * user2_AsparagusPig,AsparagusPig,10,1445230923951,2015-11-02 09:09:28.224
  *
- * <p> The Injector writes either to a PubSub topic, or a file. It will use 
the PubSub topic if
+ * <p>The Injector writes either to a PubSub topic, or a file. It will use the 
PubSub topic if
  * specified. It takes the following arguments:
  * {@code Injector project-name (topic-name|none) (filename|none)}.
  *
- * <p> To run the Injector in the mode where it publishes to PubSub, you will 
need to authenticate
+ * <p>To run the Injector in the mode where it publishes to PubSub, you will 
need to authenticate
  * locally using project-based service account credentials to avoid running 
over PubSub
  * quota.
  * See 
https://developers.google.com/identity/protocols/application-default-credentials
@@ -74,7 +74,7 @@ import org.joda.time.format.DateTimeFormatter;
  * </pre>
  * The pubsub topic will be created if it does not exist.
  *
- * <p> To run the injector in write-to-file-mode, set the topic name to "none" 
and specify the
+ * <p>To run the injector in write-to-file-mode, set the topic name to "none" 
and specify the
  * filename:
  * <pre>{@code
  * Injector <project-name> none <filename>

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/core-java/src/main/java/org/apache/beam/runners/core/DoFnRunner.java
----------------------------------------------------------------------
diff --git 
a/runners/core-java/src/main/java/org/apache/beam/runners/core/DoFnRunner.java 
b/runners/core-java/src/main/java/org/apache/beam/runners/core/DoFnRunner.java
index f4c8eea..ac64969 100644
--- 
a/runners/core-java/src/main/java/org/apache/beam/runners/core/DoFnRunner.java
+++ 
b/runners/core-java/src/main/java/org/apache/beam/runners/core/DoFnRunner.java
@@ -52,7 +52,7 @@ public interface DoFnRunner<InputT, OutputT> {
     /**
      * Gets this object as a {@link OldDoFn}.
      *
-     * Most implementors of this interface are expected to be {@link OldDoFn} 
instances, and will
+     * <p>Most implementors of this interface are expected to be {@link 
OldDoFn} instances, and will
      * return themselves.
      */
     OldDoFn<KeyedWorkItem<K, InputT>, KV<K, OutputT>> asDoFn();

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectExecutionContext.java
----------------------------------------------------------------------
diff --git 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectExecutionContext.java
 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectExecutionContext.java
index 2d2b87d..8cec8f7 100644
--- 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectExecutionContext.java
+++ 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectExecutionContext.java
@@ -28,7 +28,7 @@ import 
org.apache.beam.sdk.util.state.CopyOnAccessInMemoryStateInternals;
 /**
  * Execution Context for the {@link DirectRunner}.
  *
- * This implementation is not thread safe. A new {@link 
DirectExecutionContext} must be created
+ * <p>This implementation is not thread safe. A new {@link 
DirectExecutionContext} must be created
  * for each thread that requires it.
  */
 class DirectExecutionContext

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectRunner.java
----------------------------------------------------------------------
diff --git 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectRunner.java
 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectRunner.java
index abcc57b..224101a 100644
--- 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectRunner.java
+++ 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/DirectRunner.java
@@ -158,11 +158,10 @@ public class DirectRunner
      * Return a new {@link CommittedBundle} that is like this one, except 
calls to
      * {@link #getElements()} will return the provided elements. This bundle 
is unchanged.
      *
-     * <p>
-     * The value of the {@link #getSynchronizedProcessingOutputWatermark() 
synchronized processing
-     * output watermark} of the returned {@link CommittedBundle} is equal to 
the value returned from
-     * the current bundle. This is used to ensure a {@link PTransform} that 
could not complete
-     * processing on input elements properly holds the synchronized processing 
time to the
+     * <p>The value of the {@link #getSynchronizedProcessingOutputWatermark() 
synchronized
+     * processing output watermark} of the returned {@link CommittedBundle} is 
equal to the value
+     * returned from the current bundle. This is used to ensure a {@link 
PTransform} that could not
+     * complete processing on input elements properly holds the synchronized 
processing time to the
      * appropriate value.
      */
     CommittedBundle<T> withElements(Iterable<WindowedValue<T>> elements);
@@ -322,7 +321,7 @@ public class DirectRunner
   /**
    * The result of running a {@link Pipeline} with the {@link DirectRunner}.
    *
-   * Throws {@link UnsupportedOperationException} for all methods.
+   * <p>Throws {@link UnsupportedOperationException} for all methods.
    */
   public static class DirectPipelineResult implements PipelineResult {
     private final PipelineExecutor executor;
@@ -389,7 +388,7 @@ public class DirectRunner
      * {@link DirectOptions#isShutdownUnboundedProducersWithMaxWatermark()} 
set to false,
      * this method will never return.
      *
-     * See also {@link PipelineExecutor#awaitCompletion()}.
+     * <p>See also {@link PipelineExecutor#awaitCompletion()}.
      */
     public State awaitCompletion() throws Throwable {
       if (!state.isTerminal()) {

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ExecutorServiceParallelExecutor.java
----------------------------------------------------------------------
diff --git 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ExecutorServiceParallelExecutor.java
 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ExecutorServiceParallelExecutor.java
index 52c45c3..567def2 100644
--- 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ExecutorServiceParallelExecutor.java
+++ 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ExecutorServiceParallelExecutor.java
@@ -305,7 +305,7 @@ final class ExecutorServiceParallelExecutor implements 
PipelineExecutor {
   /**
    * An internal status update on the state of the executor.
    *
-   * Used to signal when the executor should be shut down (due to an 
exception).
+   * <p>Used to signal when the executor should be shut down (due to an 
exception).
    */
   @AutoValue
   abstract static class ExecutorUpdate {

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformEvaluator.java
----------------------------------------------------------------------
diff --git 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformEvaluator.java
 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformEvaluator.java
index 6c8e48b..1624fcb 100644
--- 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformEvaluator.java
+++ 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformEvaluator.java
@@ -37,7 +37,7 @@ public interface TransformEvaluator<InputT> {
   /**
    * Finish processing the bundle of this {@link TransformEvaluator}.
    *
-   * After {@link #finishBundle()} is called, the {@link TransformEvaluator} 
will not be reused,
+   * <p>After {@link #finishBundle()} is called, the {@link 
TransformEvaluator} will not be reused,
    * and no more elements will be processed.
    *
    * @return an {@link TransformResult} containing the results of this bundle 
evaluation.

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformResult.java
----------------------------------------------------------------------
diff --git 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformResult.java
 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformResult.java
index 0b08294..ba2d48e 100644
--- 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformResult.java
+++ 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/TransformResult.java
@@ -59,7 +59,7 @@ public interface TransformResult {
   /**
    * Returns the Watermark Hold for the transform at the time this result was 
produced.
    *
-   * If the transform does not set any watermark hold, returns
+   * <p>If the transform does not set any watermark hold, returns
    * {@link BoundedWindow#TIMESTAMP_MAX_VALUE}.
    */
   Instant getWatermarkHold();
@@ -67,7 +67,7 @@ public interface TransformResult {
   /**
    * Returns the State used by the transform.
    *
-   * If this evaluation did not access state, this may return null.
+   * <p>If this evaluation did not access state, this may return null.
    */
   @Nullable
   CopyOnAccessInMemoryStateInternals<?> getState();

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ViewEvaluatorFactory.java
----------------------------------------------------------------------
diff --git 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ViewEvaluatorFactory.java
 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ViewEvaluatorFactory.java
index a4e8d6f..43a1225 100644
--- 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ViewEvaluatorFactory.java
+++ 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/ViewEvaluatorFactory.java
@@ -139,7 +139,7 @@ class ViewEvaluatorFactory implements 
TransformEvaluatorFactory {
   /**
    * An in-process implementation of the {@link CreatePCollectionView} 
primitive.
    *
-   * This implementation requires the input {@link PCollection} to be an 
iterable
+   * <p>This implementation requires the input {@link PCollection} to be an 
iterable
    * of {@code WindowedValue<ElemT>}, which is provided
    * to {@link PCollectionView#getViewFn()} for conversion to {@link ViewT}.
    */

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/direct-java/src/main/java/org/apache/beam/runners/direct/WatermarkManager.java
----------------------------------------------------------------------
diff --git 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/WatermarkManager.java
 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/WatermarkManager.java
index 4a3108c..82a6e4f 100644
--- 
a/runners/direct-java/src/main/java/org/apache/beam/runners/direct/WatermarkManager.java
+++ 
b/runners/direct-java/src/main/java/org/apache/beam/runners/direct/WatermarkManager.java
@@ -168,7 +168,7 @@ public class WatermarkManager {
     /**
      * Returns the {@link WatermarkUpdate} that is a result of combining the 
two watermark updates.
      *
-     * If either of the input {@link WatermarkUpdate WatermarkUpdates} were 
advanced, the result
+     * <p>If either of the input {@link WatermarkUpdate WatermarkUpdates} were 
advanced, the result
      * {@link WatermarkUpdate} has been advanced.
      */
     public WatermarkUpdate union(WatermarkUpdate that) {
@@ -634,7 +634,7 @@ public class WatermarkManager {
    * latestTime argument and put in in the result with the same key, then 
remove all of the keys
    * which have no more pending timers.
    *
-   * The result collection retains ordering of timers (from earliest to 
latest).
+   * <p>The result collection retains ordering of timers (from earliest to 
latest).
    */
   private static Map<StructuralKey<?>, List<TimerData>> extractFiredTimers(
       Instant latestTime, Map<StructuralKey<?>, NavigableSet<TimerData>> 
objectTimers) {

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java
----------------------------------------------------------------------
diff --git 
a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java
 
b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java
index a92d339..6d04e0b 100644
--- 
a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java
+++ 
b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java
@@ -65,9 +65,9 @@ import org.slf4j.LoggerFactory;
 /**
  * An example that computes a basic TF-IDF search table for a directory or GCS 
prefix.
  *
- * <p> Concepts: joining data; side inputs; logging
+ * <p>Concepts: joining data; side inputs; logging
  *
- * <p> To execute this pipeline locally, specify general pipeline 
configuration:
+ * <p>To execute this pipeline locally, specify general pipeline configuration:
  * <pre>{@code
  *   --project=YOUR_PROJECT_ID
  * }</pre>
@@ -76,7 +76,7 @@ import org.slf4j.LoggerFactory;
  *   --output=[YOUR_LOCAL_FILE | gs://YOUR_OUTPUT_PREFIX]
  * }</pre>
  *
- * <p> To execute this pipeline using the Dataflow service, specify pipeline 
configuration:
+ * <p>To execute this pipeline using the Dataflow service, specify pipeline 
configuration:
  * <pre>{@code
  *   --project=YOUR_PROJECT_ID
  *   --stagingLocation=gs://YOUR_STAGING_DIRECTORY
@@ -85,14 +85,14 @@ import org.slf4j.LoggerFactory;
  *   --output=gs://YOUR_OUTPUT_PREFIX
  * }</pre>
  *
- * <p> The default input is {@code gs://dataflow-samples/shakespeare/} and can 
be overridden with
+ * <p>The default input is {@code gs://dataflow-samples/shakespeare/} and can 
be overridden with
  * {@code --input}.
  */
 public class TFIDF {
   /**
    * Options supported by {@link TFIDF}.
-   * <p>
-   * Inherits standard configuration options.
+   *
+   * <p>Inherits standard configuration options.
    */
   private interface Options extends PipelineOptions, FlinkPipelineOptions {
     @Description("Path to the directory or GCS prefix containing files to read 
from")

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/WordCount.java
----------------------------------------------------------------------
diff --git 
a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/WordCount.java
 
b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/WordCount.java
index 9cce757..c816442 100644
--- 
a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/WordCount.java
+++ 
b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/WordCount.java
@@ -96,8 +96,8 @@ public class WordCount {
 
   /**
    * Options supported by {@link WordCount}.
-   * <p>
-   * Inherits standard configuration options.
+   *
+   * <p>Inherits standard configuration options.
    */
   public interface Options extends PipelineOptions, FlinkPipelineOptions {
     @Description("Path of the file to read from")

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java
----------------------------------------------------------------------
diff --git 
a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java
 
b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java
index 4636e3f..97ba232 100644
--- 
a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java
+++ 
b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java
@@ -155,7 +155,7 @@ public class AutoComplete {
   /**
    * Cheaper but higher latency.
    *
-   * <p> Returns two PCollections, the first is top prefixes of size greater
+   * <p>Returns two PCollections, the first is top prefixes of size greater
    * than minPrefix, and the second is top prefixes of size exactly
    * minPrefix.
    */
@@ -362,7 +362,7 @@ public class AutoComplete {
   /**
    * Options supported by this class.
    *
-   * <p> Inherits standard Dataflow configuration options.
+   * <p>Inherits standard Dataflow configuration options.
    */
   private interface Options extends 
WindowedWordCount.StreamingWordCountOptions {
     @Description("Whether to use the recursive algorithm")

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java
----------------------------------------------------------------------
diff --git 
a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java
 
b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java
index a067e76..be99f29 100644
--- 
a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java
+++ 
b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java
@@ -35,10 +35,10 @@ public interface FlinkPipelineOptions
 
   /**
    * List of local files to make available to workers.
-   * <p>
-   * Jars are placed on the worker's classpath.
-   * <p>
-   * The default value is the list of jars from the main program's classpath.
+   *
+   * <p>Jars are placed on the worker's classpath.
+   *
+   * <p>The default value is the list of jars from the main program's 
classpath.
    */
   @Description("Jar-Files to send to all workers and put on the classpath. "
       + "The default value is all files from the classpath.")

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunner.java
----------------------------------------------------------------------
diff --git 
a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunner.java
 
b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunner.java
index 137fdeb..932952d 100644
--- 
a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunner.java
+++ 
b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunner.java
@@ -65,7 +65,6 @@ import org.slf4j.LoggerFactory;
  * A {@link PipelineRunner} that executes the operations in the
  * pipeline by first translating them to a Flink Plan and then executing them 
either locally
  * or on a Flink cluster, depending on the configuration.
- * <p>
  */
 public class FlinkRunner extends PipelineRunner<FlinkRunnerResult> {
 

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunnerRegistrar.java
----------------------------------------------------------------------
diff --git 
a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunnerRegistrar.java
 
b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunnerRegistrar.java
index 0e4b513..681459a 100644
--- 
a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunnerRegistrar.java
+++ 
b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkRunnerRegistrar.java
@@ -27,10 +27,10 @@ import org.apache.beam.sdk.runners.PipelineRunnerRegistrar;
 
 
 /**
- * AuteService registrar - will register FlinkRunner and FlinkOptions
+ * AutoService registrar - will register FlinkRunner and FlinkOptions
  * as possible pipeline runner services.
  *
- * It ends up in META-INF/services and gets picked up by Dataflow.
+ * <p>It ends up in META-INF/services and gets picked up by Beam.
  *
  */
 public class FlinkRunnerRegistrar {

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/functions/FlinkMultiOutputDoFnFunction.java
----------------------------------------------------------------------
diff --git 
a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/functions/FlinkMultiOutputDoFnFunction.java
 
b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/functions/FlinkMultiOutputDoFnFunction.java
index 9cc84ca..810609e 100644
--- 
a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/functions/FlinkMultiOutputDoFnFunction.java
+++ 
b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/functions/FlinkMultiOutputDoFnFunction.java
@@ -34,7 +34,7 @@ import org.apache.flink.util.Collector;
  * Encapsulates a {@link OldDoFn} that can emit to multiple
  * outputs inside a Flink {@link 
org.apache.flink.api.common.functions.RichMapPartitionFunction}.
  *
- * We get a mapping from {@link org.apache.beam.sdk.values.TupleTag} to output 
index
+ * <p>We get a mapping from {@link org.apache.beam.sdk.values.TupleTag} to 
output index
  * and must tag all outputs with the output number. Afterwards a filter will 
filter out
  * those elements that are not to be in a specific output.
  */

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/flink/runner/src/test/java/org/apache/beam/runners/flink/FlinkTestPipeline.java
----------------------------------------------------------------------
diff --git 
a/runners/flink/runner/src/test/java/org/apache/beam/runners/flink/FlinkTestPipeline.java
 
b/runners/flink/runner/src/test/java/org/apache/beam/runners/flink/FlinkTestPipeline.java
index 9f7bc00..d6240c4 100644
--- 
a/runners/flink/runner/src/test/java/org/apache/beam/runners/flink/FlinkTestPipeline.java
+++ 
b/runners/flink/runner/src/test/java/org/apache/beam/runners/flink/FlinkTestPipeline.java
@@ -31,7 +31,7 @@ public class FlinkTestPipeline extends Pipeline {
   /**
    * Creates and returns a new test pipeline for batch execution.
    *
-   * <p> Use {@link org.apache.beam.sdk.testing.PAssert} to add tests, then 
call
+   * <p>Use {@link org.apache.beam.sdk.testing.PAssert} to add tests, then call
    * {@link Pipeline#run} to execute the pipeline and check the tests.
    */
   public static FlinkTestPipeline createForBatch() {
@@ -41,7 +41,7 @@ public class FlinkTestPipeline extends Pipeline {
   /**
    * Creates and returns a new test pipeline for streaming execution.
    *
-   * <p> Use {@link org.apache.beam.sdk.testing.PAssert} to add tests, then 
call
+   * <p>Use {@link org.apache.beam.sdk.testing.PAssert} to add tests, then call
    * {@link Pipeline#run} to execute the pipeline and check the tests.
    *
    * @return The Test Pipeline
@@ -53,7 +53,7 @@ public class FlinkTestPipeline extends Pipeline {
   /**
    * Creates and returns a new test pipeline for streaming or batch execution.
    *
-   * <p> Use {@link org.apache.beam.sdk.testing.PAssert} to add tests, then 
call
+   * <p>Use {@link org.apache.beam.sdk.testing.PAssert} to add tests, then call
    * {@link Pipeline#run} to execute the pipeline and check the tests.
    *
    * @param streaming <code>True</code> for streaming mode, <code>False</code> 
for batch.

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/DataflowRunner.java
----------------------------------------------------------------------
diff --git 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/DataflowRunner.java
 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/DataflowRunner.java
index 64ac3ad..646a145 100644
--- 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/DataflowRunner.java
+++ 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/DataflowRunner.java
@@ -174,11 +174,12 @@ import org.slf4j.LoggerFactory;
  * to the Dataflow representation using the {@link DataflowPipelineTranslator} 
and then submitting
  * them to a Dataflow service for execution.
  *
- * <p><h3>Permissions</h3>
+ * <h3>Permissions</h3>
  *
- * When reading from a Dataflow source or writing to a Dataflow sink using 
{@code DataflowRunner},
- * the Google cloudservices account and the Google compute engine service 
account of the GCP project
- * running the Dataflow Job will need access to the corresponding source/sink.
+ * <p>When reading from a Dataflow source or writing to a Dataflow sink using
+ * {@code DataflowRunner}, the Google cloudservices account and the Google 
compute engine service
+ * account of the GCP project running the Dataflow Job will need access to the 
corresponding
+ * source/sink.
  *
  * <p>Please see <a 
href="https://cloud.google.com/dataflow/security-and-permissions";>Google Cloud
  * Dataflow Security and Permissions</a> for more details.

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/AssignWindows.java
----------------------------------------------------------------------
diff --git 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/AssignWindows.java
 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/AssignWindows.java
index d4f9a90..62d4aff 100644
--- 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/AssignWindows.java
+++ 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/AssignWindows.java
@@ -30,14 +30,14 @@ import org.apache.beam.sdk.values.PCollection;
  * A primitive {@link PTransform} that implements the {@link 
Window#into(WindowFn)}
  * {@link PTransform}.
  *
- * For an application of {@link Window#into(WindowFn)} that changes the {@link 
WindowFn}, applies
+ * <p>For an application of {@link Window#into(WindowFn)} that changes the 
{@link WindowFn}, applies
  * a primitive {@link PTransform} in the Dataflow service.
  *
- * For an application of {@link Window#into(WindowFn)} that does not change 
the {@link WindowFn},
+ * <p>For an application of {@link Window#into(WindowFn)} that does not change 
the {@link WindowFn},
  * applies an identity {@link ParDo} and sets the windowing strategy of the 
output
  * {@link PCollection}.
  *
- * For internal use only.
+ * <p>For internal use only.
  *
  * @param <T> the type of input element
  */

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/IsmFormat.java
----------------------------------------------------------------------
diff --git 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/IsmFormat.java
 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/IsmFormat.java
index bb8daf3..903e7b4 100644
--- 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/IsmFormat.java
+++ 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/internal/IsmFormat.java
@@ -294,7 +294,7 @@ public class IsmFormat {
     /**
      * Computes the shard id for the given key component(s).
      *
-     * The shard keys are encoded into their byte representations and hashed 
using the
+     * <p>The shard keys are encoded into their byte representations and 
hashed using the
      * <a href="http://smhasher.googlecode.com/svn/trunk/MurmurHash3.cpp";>
      * 32-bit murmur3 algorithm, x86 variant</a> (little-endian variant),
      * using {@code 1225801234} as the seed value. We ensure that shard ids for
@@ -307,7 +307,7 @@ public class IsmFormat {
     /**
      * Computes the shard id for the given key component(s).
      *
-     * Mutates {@code keyBytes} such that when returned, contains the encoded
+     * <p>Mutates {@code keyBytes} such that when returned, contains the 
encoded
      * version of the key components.
      */
     public <V, T> int encodeAndHash(List<?> keyComponents, RandomAccessData 
keyBytesToMutate) {
@@ -317,7 +317,7 @@ public class IsmFormat {
     /**
      * Computes the shard id for the given key component(s).
      *
-     * Mutates {@code keyBytes} such that when returned, contains the encoded
+     * <p>Mutates {@code keyBytes} such that when returned, contains the 
encoded
      * version of the key components. Also, mutates {@code 
keyComponentByteOffsetsToMutate} to
      * store the location where each key component's encoded byte 
representation ends within
      * {@code keyBytes}.
@@ -619,7 +619,7 @@ public class IsmFormat {
   /**
    * A coder for {@link IsmShard}s.
    *
-   * The shard descriptor is encoded as:
+   * <p>The shard descriptor is encoded as:
    * <ul>
    *   <li>id (variable length integer encoding)</li>
    *   <li>blockOffset (variable length long encoding)</li>

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java
----------------------------------------------------------------------
diff --git 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java
 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java
index dfe538d..8501f9f 100644
--- 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java
+++ 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java
@@ -186,8 +186,7 @@ public interface DataflowPipelineDebugOptions extends 
PipelineOptions {
    * thrashing or out of memory. The location of the heap file will either be 
echoed back
    * to the user, or the user will be given the opportunity to download the 
heap file.
    *
-   * <p>
-   * CAUTION: Heap dumps can of comparable size to the default boot disk. 
Consider increasing
+   * <p>CAUTION: Heap dumps can of comparable size to the default boot disk. 
Consider increasing
    * the boot disk size before setting this flag to true.
    */
   @Description("If {@literal true}, save a heap dump before killing a thread 
or process "

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/util/RandomAccessData.java
----------------------------------------------------------------------
diff --git 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/util/RandomAccessData.java
 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/util/RandomAccessData.java
index 683e16b..84c53ea 100644
--- 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/util/RandomAccessData.java
+++ 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/util/RandomAccessData.java
@@ -44,8 +44,8 @@ import org.apache.beam.sdk.util.VarInt;
  * also provides random access to bytes stored within. This wrapper allows 
users to finely
  * control the number of byte copies that occur.
  *
- * Anything stored within the in-memory buffer from offset {@link #size()} is 
considered temporary
- * unused storage.
+ * <p>Anything stored within the in-memory buffer from offset {@link #size()} 
is considered
+ * temporary unused storage.
  */
 @NotThreadSafe
 public class RandomAccessData {
@@ -54,7 +54,7 @@ public class RandomAccessData {
    * This follows the same encoding scheme as {@link ByteArrayCoder}.
    * This coder is deterministic and consistent with equals.
    *
-   * This coder does not support encoding positive infinity.
+   * <p>This coder does not support encoding positive infinity.
    */
   public static class RandomAccessDataCoder extends 
AtomicCoder<RandomAccessData> {
     private static final RandomAccessDataCoder INSTANCE = new 
RandomAccessDataCoder();
@@ -192,7 +192,7 @@ public class RandomAccessData {
    * is strictly greater than this. Note that if this is empty or is all 0xFF 
then
    * a token value of positive infinity is returned.
    *
-   * The {@link UnsignedLexicographicalComparator} supports comparing {@link 
RandomAccessData}
+   * <p>The {@link UnsignedLexicographicalComparator} supports comparing 
{@link RandomAccessData}
    * with support for positive infinitiy.
    */
   public RandomAccessData increment() throws IOException {

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunner.java
----------------------------------------------------------------------
diff --git 
a/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunner.java 
b/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunner.java
index 3888ec2..188479c 100644
--- a/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunner.java
+++ b/runners/spark/src/main/java/org/apache/beam/runners/spark/SparkRunner.java
@@ -59,7 +59,7 @@ import org.slf4j.LoggerFactory;
  * EvaluationResult result = SparkRunner.create().run(p);
  * }
  *
- * To create a pipeline runner to run against a different spark cluster, with 
a custom master url
+ * <p>To create a pipeline runner to run against a different spark cluster, 
with a custom master url
  * we would do the following:
  *
  * {@code

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/main/java/org/apache/beam/runners/spark/TestSparkRunner.java
----------------------------------------------------------------------
diff --git 
a/runners/spark/src/main/java/org/apache/beam/runners/spark/TestSparkRunner.java
 
b/runners/spark/src/main/java/org/apache/beam/runners/spark/TestSparkRunner.java
index a1e5918..6ad6556 100644
--- 
a/runners/spark/src/main/java/org/apache/beam/runners/spark/TestSparkRunner.java
+++ 
b/runners/spark/src/main/java/org/apache/beam/runners/spark/TestSparkRunner.java
@@ -40,8 +40,8 @@ import org.apache.beam.sdk.values.POutput;
  * EvaluationResult result = SparkRunner.create().run(p);
  * }
  *
- * To create a pipeline runner to run against a different spark cluster, with 
a custom master url we
- * would do the following:
+ * <p>To create a pipeline runner to run against a different spark cluster, 
with a custom master url
+ * we would do the following:
  *
  * {@code
  * Pipeline p = [logic for pipeline creation]

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java
----------------------------------------------------------------------
diff --git 
a/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java
 
b/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java
index b40e022..e63c660 100644
--- 
a/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java
+++ 
b/runners/spark/src/main/java/org/apache/beam/runners/spark/coders/WritableCoder.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.io.Writable;
 /**
  * A {@code WritableCoder} is a {@link Coder} for a Java class that implements 
{@link Writable}.
  *
- * <p> To use, specify the coder type on a PCollection:
+ * <p>To use, specify the coder type on a PCollection:
  * <pre>
  * {@code
  *   PCollection<MyRecord> records =

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateAware.java
----------------------------------------------------------------------
diff --git 
a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateAware.java
 
b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateAware.java
index 0a30f9f..d78b437 100644
--- 
a/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateAware.java
+++ 
b/runners/spark/src/main/java/org/apache/beam/runners/spark/io/hadoop/ShardNameTemplateAware.java
@@ -24,7 +24,7 @@ package org.apache.beam.runners.spark.io.hadoop;
  * that they produce shard names that adhere to the template in
  * {@link HadoopIO.Write}.
  *
- * Some common shard names are defined in
+ * <p>Some common shard names are defined in
  * {@link org.apache.beam.sdk.io.ShardNameTemplate}.
  */
 public interface ShardNameTemplateAware {

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/GroupCombineFunctions.java
----------------------------------------------------------------------
diff --git 
a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/GroupCombineFunctions.java
 
b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/GroupCombineFunctions.java
index eb4002e..18926bc 100644
--- 
a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/GroupCombineFunctions.java
+++ 
b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/GroupCombineFunctions.java
@@ -52,7 +52,7 @@ import scala.Tuple2;
  */
 public class GroupCombineFunctions {
 
-  /***
+  /**
    * Apply {@link GroupByKeyViaGroupByKeyOnly.GroupByKeyOnly} to a Spark RDD.
    */
   public static <K, V> JavaRDD<WindowedValue<KV<K, Iterable<V>>>> 
groupByKeyOnly(
@@ -71,7 +71,7 @@ public class GroupCombineFunctions {
         .map(WindowingHelpers.<KV<K, Iterable<V>>>windowFunction());
   }
 
-  /***
+  /**
    * Apply {@link GroupByKeyViaGroupByKeyOnly.GroupAlsoByWindow} to a Spark 
RDD.
    */
   public static <K, V, W extends BoundedWindow> JavaRDD<WindowedValue<KV<K, 
Iterable<V>>>>

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TranslationUtils.java
----------------------------------------------------------------------
diff --git 
a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TranslationUtils.java
 
b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TranslationUtils.java
index 9b156fe..9ad2a9e 100644
--- 
a/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TranslationUtils.java
+++ 
b/runners/spark/src/main/java/org/apache/beam/runners/spark/translation/TranslationUtils.java
@@ -165,7 +165,7 @@ public final class TranslationUtils {
     }
   }
 
-  /***
+  /**
    * Create SideInputs as Broadcast variables.
    *
    * @param views   The {@link PCollectionView}s.

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/runners/spark/src/test/java/org/apache/beam/runners/spark/translation/streaming/RecoverFromCheckpointStreamingTest.java
----------------------------------------------------------------------
diff --git 
a/runners/spark/src/test/java/org/apache/beam/runners/spark/translation/streaming/RecoverFromCheckpointStreamingTest.java
 
b/runners/spark/src/test/java/org/apache/beam/runners/spark/translation/streaming/RecoverFromCheckpointStreamingTest.java
index 4a96690..05e9125 100644
--- 
a/runners/spark/src/test/java/org/apache/beam/runners/spark/translation/streaming/RecoverFromCheckpointStreamingTest.java
+++ 
b/runners/spark/src/test/java/org/apache/beam/runners/spark/translation/streaming/RecoverFromCheckpointStreamingTest.java
@@ -61,8 +61,8 @@ import org.junit.rules.TemporaryFolder;
 /**
  * Tests DStream recovery from checkpoint - recreate the job and continue 
(from checkpoint).
  *
- * Tests Aggregators, which rely on Accumulators - Aggregators should be 
available, though state
- * is not preserved (Spark issue), so they start from initial value.
+ * <p>Tests Aggregators, which rely on Accumulators - Aggregators should be 
available, though
+ * state is not preserved (Spark issue), so they start from initial value.
  * //TODO: after the runner supports recovering the state of Aggregators, 
update this test's
  * expected values for the recovered (second) run.
  */

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml
----------------------------------------------------------------------
diff --git a/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml 
b/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml
index ae4fcba..ca42652 100644
--- a/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml
+++ b/sdks/java/build-tools/src/main/resources/beam/checkstyle.xml
@@ -153,6 +153,9 @@ page at http://checkstyle.sourceforge.net/config.html -->
       <property name="allowUndeclaredRTE" value="true"/>
     </module>
 
+    <!-- Check that paragraph tags are used correctly in Javadoc. -->
+    <module name="JavadocParagraph"/>
+
     <module name="JavadocType">
       <property name="scope" value="protected"/>
       <property name="severity" value="error"/>

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java
----------------------------------------------------------------------
diff --git 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java
index 6ef02aa..f7ce3c2 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java
@@ -115,6 +115,7 @@ import 
org.apache.commons.compress.utils.CountingInputStream;
  * }</pre>
  *
  * <h3>Permissions</h3>
+ *
  * <p>Permission requirements depend on the {@link PipelineRunner} that is 
used to execute the
  * Dataflow job. Please refer to the documentation of corresponding {@link 
PipelineRunner}s for
  * more details.

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java
----------------------------------------------------------------------
diff --git 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java
index 5fd7b8a..cd4d7db 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java
@@ -85,11 +85,13 @@ public abstract class BoundedSource<T> extends Source<T> {
    * operations, such as progress estimation and dynamic work rebalancing.
    *
    * <h3>Boundedness</h3>
-   * <p>Once {@link #start} or {@link #advance} has returned false, neither 
will be called
+   *
+   *  <p>Once {@link #start} or {@link #advance} has returned false, neither 
will be called
    * again on this object.
    *
    * <h3>Thread safety</h3>
-   * All methods will be run from the same thread except {@link 
#splitAtFraction},
+   *
+   * <p>All methods will be run from the same thread except {@link 
#splitAtFraction},
    * {@link #getFractionConsumed}, {@link #getCurrentSource}, {@link 
#getSplitPointsConsumed()},
    * and {@link #getSplitPointsRemaining()}, all of which can be called 
concurrently
    * from a different thread. There will not be multiple concurrent calls to
@@ -106,7 +108,8 @@ public abstract class BoundedSource<T> extends Source<T> {
    * {@link #getCurrentSource} which do not change between {@link 
#splitAtFraction} calls.
    *
    * <h3>Implementing {@link #splitAtFraction}</h3>
-   * In the course of dynamic work rebalancing, the method {@link 
#splitAtFraction}
+   *
+   * <p>In the course of dynamic work rebalancing, the method {@link 
#splitAtFraction}
    * may be called concurrently with {@link #advance} or {@link #start}. It is 
critical that
    * their interaction is implemented in a thread-safe way, otherwise data 
loss is possible.
    *
@@ -261,14 +264,17 @@ public abstract class BoundedSource<T> extends Source<T> {
      * (including items already read).
      *
      * <h3>Usage</h3>
+     *
      * <p>Reader subclasses can use this method for convenience to access 
unchanging properties of
      * the source being read. Alternatively, they can cache these properties 
in the constructor.
+     *
      * <p>The framework will call this method in the course of dynamic work 
rebalancing, e.g. after
      * a successful {@link BoundedSource.BoundedReader#splitAtFraction} call.
      *
      * <h3>Mutability and thread safety</h3>
-     * Remember that {@link Source} objects must always be immutable. However, 
the return value of
-     * this function may be affected by dynamic work rebalancing, happening 
asynchronously via
+     *
+     * <p>Remember that {@link Source} objects must always be immutable. 
However, the return value
+     * of this function may be affected by dynamic work rebalancing, happening 
asynchronously via
      * {@link BoundedSource.BoundedReader#splitAtFraction}, meaning it can 
return a different
      * {@link Source} object. However, the returned object itself will still 
itself be immutable.
      * Callers must take care not to rely on properties of the returned source 
that may be
@@ -276,7 +282,8 @@ public abstract class BoundedSource<T> extends Source<T> {
      * reading a file).
      *
      * <h3>Implementation</h3>
-     * For convenience, subclasses should usually return the most concrete 
subclass of
+     *
+     * <p>For convenience, subclasses should usually return the most concrete 
subclass of
      * {@link Source} possible.
      * In practice, the implementation of this method should nearly always be 
one of the following:
      * <ul>

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/FileBasedSink.java
----------------------------------------------------------------------
diff --git 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/FileBasedSink.java 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/FileBasedSink.java
index ea95f2f..f571d50 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/FileBasedSink.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/FileBasedSink.java
@@ -185,6 +185,7 @@ public abstract class FileBasedSink<T> extends Sink<T> {
    * {@link FileBasedSink#fileNamingTemplate}.
    *
    * <h2>Temporary Bundle File Handling:</h2>
+   *
    * <p>{@link FileBasedSink.FileBasedWriteOperation#temporaryFileRetention} 
controls the behavior
    * for managing temporary files. By default, temporary files will be 
removed. Subclasses can
    * provide a different value to the constructor.

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/OffsetBasedSource.java
----------------------------------------------------------------------
diff --git 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/OffsetBasedSource.java 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/OffsetBasedSource.java
index 6c685ff..6e49cc3 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/OffsetBasedSource.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/OffsetBasedSource.java
@@ -248,7 +248,9 @@ public abstract class OffsetBasedSource<T> extends 
BoundedSource<T> {
      * Returns the <i>starting</i> offset of the {@link 
Source.Reader#getCurrent current record},
      * which has been read by the last successful {@link Source.Reader#start} 
or
      * {@link Source.Reader#advance} call.
+     *
      * <p>If no such call has been made yet, the return value is unspecified.
+     *
      * <p>See {@link RangeTracker} for description of offset semantics.
      */
     protected abstract long getCurrentOffset() throws NoSuchElementException;

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java
index d113457..2f9054f 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java
@@ -59,6 +59,7 @@ import org.slf4j.LoggerFactory;
  * and consume unbounded {@link PCollection PCollections}.
  *
  * <h3>Permissions</h3>
+ *
  * <p>Permission requirements depend on the {@link PipelineRunner} that is 
used to execute the
  * Dataflow job. Please refer to the documentation of corresponding
  * {@link PipelineRunner PipelineRunners} for more details.

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Sink.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Sink.java 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Sink.java
index 1abcc3d..3f49eac 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Sink.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Sink.java
@@ -71,6 +71,7 @@ import org.apache.beam.sdk.values.PCollection;
  * </ul>
  *
  * <h2>WriteOperation</h2>
+ *
  * <p>{@link WriteOperation#initialize} and {@link WriteOperation#finalize} 
are conceptually called
  * once: at the beginning and end of a Write transform. However, implementors 
must ensure that these
  * methods are idempotent, as they may be called multiple times on different 
machines in the case of
@@ -91,6 +92,7 @@ import org.apache.beam.sdk.values.PCollection;
  * these mutations will not be visible in finalize).
  *
  * <h2>Bundle Ids:</h2>
+ *
  * <p>In order to ensure fault-tolerance, a bundle may be executed multiple 
times (e.g., in the
  * event of failure/retry or for redundancy). However, exactly one of these 
executions will have its
  * result passed to the WriteOperation's finalize method. Each call to {@link 
Writer#open} is passed
@@ -110,6 +112,7 @@ import org.apache.beam.sdk.values.PCollection;
  * of output file names that it can then merge or rename using some bundle 
naming scheme.
  *
  * <h2>Writer Results:</h2>
+ *
  * <p>{@link WriteOperation}s and {@link Writer}s must agree on a writer 
result type that will be
  * returned by a Writer after it writes a bundle. This type can be a 
client-defined object or an
  * existing type; {@link WriteOperation#getWriterResultCoder} should return a 
{@link Coder} for the

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TextIO.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TextIO.java 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TextIO.java
index 62d3ae8..6ec4533 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TextIO.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TextIO.java
@@ -107,6 +107,7 @@ import org.apache.beam.sdk.values.PDone;
  * }</pre>
  *
  * <h3>Permissions</h3>
+ *
  * <p>When run using the {@code DirectRunner}, your pipeline can read and 
write text files
  * on your local drive and remote text files on Google Cloud Storage that you 
have access to using
  * your {@code gcloud} credentials. When running in the Dataflow service, the 
pipeline can only
@@ -230,7 +231,7 @@ public class TextIO {
 
       /**
        * Returns a new transform for reading from text files that's like this 
one but
-       * that uses the given {@link Coder Coder<X>} to decode each of the
+       * that uses the given {@link Coder Coder&lt;X&gt;} to decode each of the
        * lines of the file into a value of type {@code X}.
        *
        * <p>Does not modify this object.
@@ -602,8 +603,8 @@ public class TextIO {
 
       /**
        * Returns a transform for writing to text files that's like this one
-       * but that uses the given {@link Coder Coder<X>} to encode each of
-       * the elements of the input {@link PCollection PCollection<X>} into an
+       * but that uses the given {@link Coder Coder&lt;X&gt;} to encode each of
+       * the elements of the input {@link PCollection PCollection&lt;X&gt;} 
into an
        * output text line. Does not modify this object.
        *
        * @param <X> the type of the elements of the input {@link PCollection}
@@ -853,7 +854,7 @@ public class TextIO {
      * A {@link org.apache.beam.sdk.io.FileBasedSource.FileBasedReader 
FileBasedReader}
      * which can decode records delimited by newline characters.
      *
-     * See {@link TextSource} for further details.
+     * <p>See {@link TextSource} for further details.
      */
     @VisibleForTesting
     static class TextBasedReader<T> extends FileBasedReader<T> {
@@ -985,7 +986,7 @@ public class TextIO {
       /**
        * Decodes the current element updating the buffer to only contain the 
unconsumed bytes.
        *
-       * This invalidates the currently stored {@code 
startOfSeparatorInBuffer} and
+       * <p>This invalidates the currently stored {@code 
startOfSeparatorInBuffer} and
        * {@code endOfSeparatorInBuffer}.
        */
       private void decodeCurrentElement() throws IOException {

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Write.java
----------------------------------------------------------------------
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Write.java 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Write.java
index 9d0beb7..e8b19d9 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Write.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/Write.java
@@ -69,7 +69,7 @@ import org.slf4j.LoggerFactory;
  * <p>Example usage with runner-controlled sharding:
  *
  * <pre>{@code p.apply(Write.to(new MySink(...)));}</pre>
-
+ *
  * <p>Example usage with a fixed number of shards:
  *
  * <pre>{@code p.apply(Write.to(new MySink(...)).withNumShards(3));}</pre>

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/RangeTracker.java
----------------------------------------------------------------------
diff --git 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/RangeTracker.java 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/RangeTracker.java
index ad2f119..f352f01 100644
--- 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/RangeTracker.java
+++ 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/RangeTracker.java
@@ -192,6 +192,7 @@ public interface RangeTracker<PositionT> {
    *   <li>Otherwise, updates the last-consumed position to {@code 
recordStart} and returns
    *   {@code true}.
    * </ul>
+   *
    * <p>This method MUST be called on all split point records. It may be 
called on every record.
    */
   boolean tryReturnRecordAt(boolean isAtSplitPoint, PositionT recordStart);

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/options/PipelineOptions.java
----------------------------------------------------------------------
diff --git 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/options/PipelineOptions.java 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/options/PipelineOptions.java
index 3e810e9..deb1cf4 100644
--- 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/options/PipelineOptions.java
+++ 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/options/PipelineOptions.java
@@ -93,7 +93,7 @@ import org.joda.time.format.DateTimeFormatter;
  *
  * <h2>Defining Your Own PipelineOptions</h2>
  *
- * Defining your own {@link PipelineOptions} is the way for you to make 
configuration
+ * <p>Defining your own {@link PipelineOptions} is the way for you to make 
configuration
  * options available for both local execution and execution via a {@link 
PipelineRunner}.
  * By having PipelineOptionsFactory as your command-line interpreter, you will 
provide
  * a standardized way for users to interact with your application via the 
command-line.
@@ -117,7 +117,7 @@ import org.joda.time.format.DateTimeFormatter;
  *
  * <h3>Restrictions</h3>
  *
- * Since PipelineOptions can be "cast" to multiple types dynamically using
+ * <p>Since PipelineOptions can be "cast" to multiple types dynamically using
  * {@link PipelineOptions#as(Class)}, a property must conform to the following 
set of restrictions:
  * <ul>
  *   <li>Any property with the same name must have the same return type for 
all derived
@@ -134,7 +134,7 @@ import org.joda.time.format.DateTimeFormatter;
  *
  * <h3>Annotations For PipelineOptions</h3>
  *
- * {@link Description @Description} can be used to annotate an interface or a 
getter
+ * <p>{@link Description @Description} can be used to annotate an interface or 
a getter
  * with useful information which is output when {@code --help}
  * is invoked via {@link PipelineOptionsFactory#fromArgs(String[])}.
  *
@@ -158,7 +158,7 @@ import org.joda.time.format.DateTimeFormatter;
  *
  * <h2>Registration Of PipelineOptions</h2>
  *
- * Registration of {@link PipelineOptions} by an application guarantees that 
the
+ * <p>Registration of {@link PipelineOptions} by an application guarantees 
that the
  * {@link PipelineOptions} is composable during execution of their {@link 
Pipeline} and
  * meets the restrictions listed above or will fail during registration. 
Registration
  * also lists the registered {@link PipelineOptions} when {@code --help}

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ProxyInvocationHandler.java
----------------------------------------------------------------------
diff --git 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ProxyInvocationHandler.java
 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ProxyInvocationHandler.java
index aa6f500..c438a43 100644
--- 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ProxyInvocationHandler.java
+++ 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/options/ProxyInvocationHandler.java
@@ -198,7 +198,7 @@ class ProxyInvocationHandler implements InvocationHandler, 
HasDisplayData {
    * Backing implementation for {@link PipelineOptions#as(Class)}.
    *
    * @param iface The interface that the returned object needs to implement.
-   * @return An object that implements the interface <T>.
+   * @return An object that implements the interface {@code <T>}.
    */
   synchronized <T extends PipelineOptions> T as(Class<T> iface) {
     checkNotNull(iface);

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java
----------------------------------------------------------------------
diff --git 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java
 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java
index bd44c48..1021b2f 100644
--- 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java
+++ 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java
@@ -1047,14 +1047,14 @@ class SerializableMatchers implements Serializable {
    * the {@link Matcher} returned by {@link SerializableSupplier#get() get()} 
when it is invoked
    * during matching (which may occur on another machine, such as a Dataflow 
worker).
    *
-   * <code>
+   * <pre>{@code
    * return fromSupplier(new SerializableSupplier<Matcher<T>>() {
    *   *     @Override
    *     public Matcher<T> get() {
    *       return new MyMatcherForT();
    *     }
    * });
-   * </code>
+   * }</pre>
    */
   public static <T> SerializableMatcher<T> fromSupplier(
       SerializableSupplier<Matcher<T>> supplier) {

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SourceTestUtils.java
----------------------------------------------------------------------
diff --git 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SourceTestUtils.java 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SourceTestUtils.java
index e38e1af..dd62aeb 100644
--- 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SourceTestUtils.java
+++ 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SourceTestUtils.java
@@ -285,7 +285,8 @@ public class SourceTestUtils {
    * Asserts that the {@code source}'s reader either fails to {@code 
splitAtFraction(fraction)}
    * after reading {@code numItemsToReadBeforeSplit} items, or succeeds in a 
way that is
    * consistent according to {@link 
#assertSplitAtFractionSucceedsAndConsistent}.
-   * <p> Returns SplitAtFractionResult.
+   *
+   * <p>Returns SplitAtFractionResult.
    */
 
   public static <T> SplitAtFractionResult assertSplitAtFractionBehavior(

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ApproximateQuantiles.java
----------------------------------------------------------------------
diff --git 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ApproximateQuantiles.java
 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ApproximateQuantiles.java
index 656bd7b..ed3a253 100644
--- 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ApproximateQuantiles.java
+++ 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ApproximateQuantiles.java
@@ -207,8 +207,9 @@ public class ApproximateQuantiles {
    * </pre>
    *
    * <p>The default error bound is {@code 1 / N}, though in practice
-   * the accuracy tends to be much better.  <p>See
-   * {@link #create(int, Comparator, long, double)} for
+   * the accuracy tends to be much better.
+   *
+   * <p>See {@link #create(int, Comparator, long, double)} for
    * more information about the meaning of {@code epsilon}, and
    * {@link #withEpsilon} for a convenient way to adjust it.
    *

http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Combine.java
----------------------------------------------------------------------
diff --git 
a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Combine.java 
b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Combine.java
index a00dcba..e9216e1 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Combine.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Combine.java
@@ -672,7 +672,7 @@ public class Combine {
    * An abstract subclass of {@link CombineFn} for implementing combiners that 
are more
    * easily and efficiently expressed as binary operations on <code>int</code>s
    *
-   * <p> It uses {@code int[0]} as the mutable accumulator.
+   * <p>It uses {@code int[0]} as the mutable accumulator.
    */
   public abstract static class BinaryCombineIntegerFn extends 
CombineFn<Integer, int[], Integer> {
 
@@ -774,7 +774,7 @@ public class Combine {
    * An abstract subclass of {@link CombineFn} for implementing combiners that 
are more
    * easily and efficiently expressed as binary operations on 
<code>long</code>s.
    *
-   * <p> It uses {@code long[0]} as the mutable accumulator.
+   * <p>It uses {@code long[0]} as the mutable accumulator.
    */
   public abstract static class BinaryCombineLongFn extends CombineFn<Long, 
long[], Long> {
     /**
@@ -873,7 +873,7 @@ public class Combine {
    * An abstract subclass of {@link CombineFn} for implementing combiners that 
are more
    * easily and efficiently expressed as binary operations on 
<code>double</code>s.
    *
-   * <p> It uses {@code double[0]} as the mutable accumulator.
+   * <p>It uses {@code double[0]} as the mutable accumulator.
    */
   public abstract static class BinaryCombineDoubleFn extends CombineFn<Double, 
double[], Double> {
 


Reply via email to