You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by mi...@apache.org on 2015/06/16 06:18:20 UTC

[2/2] hbase git commit: Revert "Correct Javadoc generation errors"

Revert "Correct Javadoc generation errors"

This reverts commit 0a227b79d6c92443752a2a0c53514cb7ef02f61b.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5e6373e8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5e6373e8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5e6373e8

Branch: refs/heads/master
Commit: 5e6373e8ecfb99bc53860fb153dd4bc7772c911b
Parents: 4eb7993
Author: Misty Stanley-Jones <ms...@cloudera.com>
Authored: Tue Jun 16 14:15:37 2015 +1000
Committer: Misty Stanley-Jones <ms...@cloudera.com>
Committed: Tue Jun 16 14:18:09 2015 +1000

----------------------------------------------------------------------
 .../hbase/client/coprocessor/package-info.java  | 30 ++++++++++----------
 .../client/metrics/ServerSideScanMetrics.java   |  4 +--
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |  4 +--
 .../org/apache/hadoop/hbase/CellComparator.java | 12 ++++----
 .../coprocessor/example/BulkDeleteEndpoint.java |  4 +--
 .../decode/PrefixTreeArraySearcher.java         |  2 +-
 .../encode/tokenize/TokenizerNode.java          |  5 +---
 .../codec/prefixtree/scanner/CellSearcher.java  |  8 +++---
 .../hbase/procedure2/store/ProcedureStore.java  |  3 +-
 .../protobuf/HBaseZeroCopyByteString.java       |  1 -
 .../hadoop/hbase/protobuf/ProtobufMagic.java    |  2 --
 .../hadoop/hbase/rest/client/RemoteAdmin.java   |  8 +++---
 .../hadoop/hbase/rest/model/ScannerModel.java   |  2 +-
 .../hbase/coprocessor/RegionObserver.java       |  2 +-
 .../apache/hadoop/hbase/http/HttpServer.java    |  6 ++--
 .../apache/hadoop/hbase/http/InfoServer.java    |  8 +++---
 .../hadoop/hbase/http/jmx/JMXJsonServlet.java   | 17 ++++-------
 .../hadoop/hbase/http/jmx/package-info.java     |  2 +-
 .../hadoop/hbase/http/lib/package-info.java     |  2 +-
 .../hadoop/hbase/io/hfile/HFileBlockIndex.java  |  5 ++--
 .../hbase/io/hfile/bucket/BucketAllocator.java  |  3 +-
 .../hbase/mapred/TableInputFormatBase.java      |  2 +-
 .../MultiTableSnapshotInputFormatImpl.java      |  6 ++--
 .../cleaner/BaseHFileCleanerDelegate.java       |  5 +---
 .../hbase/master/handler/TableEventHandler.java |  1 +
 .../master/snapshot/MasterSnapshotVerifier.java |  2 +-
 .../hadoop/hbase/regionserver/HRegion.java      |  1 +
 .../compactions/CompactionConfiguration.java    |  8 ++----
 .../regionserver/compactions/Compactor.java     |  2 +-
 .../hadoop/hbase/tool/WriteSinkCoprocessor.java | 19 +++++--------
 .../hadoop/hbase/util/BloomFilterChunk.java     |  2 +-
 .../hbase/util/hbck/HFileCorruptionChecker.java |  2 +-
 .../hadoop/hbase/wal/DefaultWALProvider.java    |  4 +--
 .../apache/hadoop/hbase/wal/WALSplitter.java    |  6 ++--
 .../zookeeper/lock/ZKInterProcessLockBase.java  |  2 +-
 35 files changed, 84 insertions(+), 108 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
index c70f27f..8af120f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
@@ -20,10 +20,12 @@
 /**
 Provides client classes for invoking Coprocessor RPC protocols
 
+<p>
 <ul>
  <li><a href="#overview">Overview</a></li>
  <li><a href="#usage">Example Usage</a></li>
 </ul>
+</p>
 
 <h2><a name="overview">Overview</a></h2>
 <p>
@@ -36,7 +38,6 @@ protocols.
 <p>
 In order to provide a custom RPC protocol to clients, a coprocessor implementation
 must:
-</p>
 <ul>
  <li>Define a protocol buffer Service and supporting Message types for the RPC methods.
  See the
@@ -48,7 +49,6 @@ must:
  {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService#getService()}
  method should return a reference to the Endpoint's protocol buffer Service instance.
 </ul>
-<p>
 Clients may then call the defined service methods on coprocessor instances via
 the {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])},
 {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}, and
@@ -63,7 +63,6 @@ method invocations.  Since regions are seldom handled directly in client code
 and the region names may change over time, the coprocessor RPC calls use row keys
 to identify which regions should be used for the method invocations.  Clients
 can call coprocessor Service methods against either:
-</p>
 <ul>
  <li><strong>a single region</strong> - calling
    {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}
@@ -78,6 +77,7 @@ can call coprocessor Service methods against either:
    from the region containing the start row key to the region containing the end
    row key (inclusive), will we used as the RPC endpoints.</li>
 </ul>
+</p>
 
 <p><em>Note that the row keys passed as parameters to the <code>Table</code>
 methods are not passed directly to the coprocessor Service implementations.
@@ -135,12 +135,12 @@ public static abstract class RowCountService
     public abstract void getRowCount(
         com.google.protobuf.RpcController controller,
         org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
-        com.google.protobuf.RpcCallback&lt;org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse&gt; done);
+        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
 
     public abstract void getKeyValueCount(
         com.google.protobuf.RpcController controller,
         org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
-        com.google.protobuf.RpcCallback&lt;org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse&gt; done);
+        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
   }
 }
 </pre></blockquote></div>
@@ -163,13 +163,13 @@ use:
 Connection connection = ConnectionFactory.createConnection(conf);
 Table table = connection.getTable(TableName.valueOf("mytable"));
 final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
-Map&lt;byte[],Long&gt; results = table.coprocessorService(
+Map<byte[],Long> results = table.coprocessorService(
     ExampleProtos.RowCountService.class, // the protocol interface we're invoking
     null, null,                          // start and end row keys
-    new Batch.Call&lt;ExampleProtos.RowCountService,Long&gt;() {
+    new Batch.Call<ExampleProtos.RowCountService,Long>() {
         public Long call(ExampleProtos.RowCountService counter) throws IOException {
-          BlockingRpcCallback&lt;ExampleProtos.CountResponse&gt; rpcCallback =
-              new BlockingRpcCallback&lt;ExampleProtos.CountResponse&gt;();
+          BlockingRpcCallback<ExampleProtos.CountResponse> rpcCallback =
+              new BlockingRpcCallback<ExampleProtos.CountResponse>();
           counter.getRowCount(null, request, rpcCallback);
           ExampleProtos.CountResponse response = rpcCallback.get();
           return response.hasCount() ? response.getCount() : 0;
@@ -204,17 +204,17 @@ Connection connection = ConnectionFactory.createConnection(conf);
 Table table = connection.getTable(TableName.valueOf("mytable"));
 // combine row count and kv count for region
 final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
-Map&lt;byte[],Long&gt; results = table.coprocessorService(
+Map<byte[],Long> results = table.coprocessorService(
     ExampleProtos.RowCountService.class, // the protocol interface we're invoking
     null, null,                          // start and end row keys
-    new Batch.Call&lt;ExampleProtos.RowCountService,Pair&lt;Long,Long&gt;&gt;() {
+    new Batch.Call<ExampleProtos.RowCountService,Pair<Long,Long>>() {
        public Long call(ExampleProtos.RowCountService counter) throws IOException {
-         BlockingRpcCallback&lt;ExampleProtos.CountResponse&gt; rowCallback =
-             new BlockingRpcCallback&lt;ExampleProtos.CountResponse&gt;();
+         BlockingRpcCallback<ExampleProtos.CountResponse> rowCallback =
+             new BlockingRpcCallback<ExampleProtos.CountResponse>();
          counter.getRowCount(null, request, rowCallback);
 
-         BlockingRpcCallback&lt;ExampleProtos.CountResponse&gt; kvCallback =
-             new BlockingRpcCallback&lt;ExampleProtos.CountResponse&gt;();
+         BlockingRpcCallback<ExampleProtos.CountResponse> kvCallback =
+             new BlockingRpcCallback<ExampleProtos.CountResponse>();
          counter.getKeyValueCount(null, request, kvCallback);
 
          ExampleProtos.CountResponse rowResponse = rowCallback.get();

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
index 46b67d4..c971c73 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
@@ -33,7 +33,7 @@ import com.google.common.collect.ImmutableMap;
 @InterfaceStability.Evolving
 public class ServerSideScanMetrics {
   /**
-   * Hash to hold the String -&gt; Atomic Long mappings for each metric
+   * Hash to hold the String -> Atomic Long mappings for each metric
    */
   private final Map<String, AtomicLong> counters = new HashMap<String, AtomicLong>();
 
@@ -103,7 +103,7 @@ public class ServerSideScanMetrics {
   /**
    * Get all of the values since the last time this function was called. Calling this function will
    * reset all AtomicLongs in the instance back to 0.
-   * @return A Map of String -&gt; Long for metrics
+   * @return A Map of String -> Long for metrics
    */
   public Map<String, Long> getMetricsMap() {
     // Create a builder

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 1bcf1e6..a87fd47 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -270,8 +270,6 @@ public final class ProtobufUtil {
 
   /**
    * @param bytes Bytes to check.
-   * @param offset offset to start at
-   * @param len length to use
    * @return True if passed <code>bytes</code> has {@link ProtobufMagic#PB_MAGIC} for a prefix.
    */
   public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) {
@@ -281,7 +279,7 @@ public final class ProtobufUtil {
   }
 
   /**
-   * @param bytes bytes to check
+   * @param bytes
    * @throws DeserializationException if we are missing the pb magic prefix
    */
   public static void expectPBMagicPrefix(final byte [] bytes) throws DeserializationException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
index 67941bc..2d0c940 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
@@ -70,8 +70,8 @@ public class CellComparator implements Comparator<Cell>, Serializable {
    * cell 
    * @param left
    * @param right
-   * @return an int greater than 0 if left &gt; than right
-   *                lesser than 0 if left &lt; than right
+   * @return an int greater than 0 if left > than right
+   *                lesser than 0 if left < than right
    *                equal to 0 if left is equal to right
    */
   public final int compareKeyIgnoresMvcc(Cell left, Cell right) {
@@ -512,8 +512,8 @@ public class CellComparator implements Comparator<Cell>, Serializable {
    * wrong but it is intentional. This way, newer timestamps are first
    * found when we iterate over a memstore and newer versions are the
    * first we trip over when reading from a store file.
-   * @return 1 if left's timestamp &lt; right's timestamp
-   *         -1 if left's timestamp &gt; right's timestamp
+   * @return 1 if left's timestamp < right's timestamp
+   *         -1 if left's timestamp > right's timestamp
    *         0 if both timestamps are equal
    */
   public static int compareTimestamps(final Cell left, final Cell right) {
@@ -601,8 +601,8 @@ public class CellComparator implements Comparator<Cell>, Serializable {
    * wrong but it is intentional. This way, newer timestamps are first
    * found when we iterate over a memstore and newer versions are the
    * first we trip over when reading from a store file.
-   * @return 1 if left timestamp &lt; right timestamp
-   *         -1 if left timestamp &gt; right timestamp
+   * @return 1 if left timestamp < right timestamp
+   *         -1 if left timestamp > right timestamp
    *         0 if both timestamps are equal
    */
   public static int compareTimestamps(final long ltimestamp, final long rtimestamp) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java
index 1515dc1..93f98ac 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java
@@ -66,7 +66,7 @@ import com.google.protobuf.Service;
  * deleted(even if Scan fetches many versions). When timestamp passed as null, all the versions
  * which the Scan selects will get deleted.
  * 
- * <br> Example: <pre><code>
+ * </br> Example: <code><pre>
  * Scan scan = new Scan();
  * // set scan properties(rowkey range, filters, timerange etc).
  * HTable ht = ...;
@@ -93,7 +93,7 @@ import com.google.protobuf.Service;
  * for (BulkDeleteResponse response : result.values()) {
  *   noOfDeletedRows += response.getRowsDeleted();
  * }
- * </code></pre>
+ * </pre></code>
  */
 public class BulkDeleteEndpoint extends BulkDeleteService implements CoprocessorService,
     Coprocessor {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
index bfed995..eb0e41f 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
@@ -310,7 +310,7 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im
   /****************** complete seek when token mismatch ******************/
 
   /**
-   * @param searcherIsAfterInputKey &lt;0: input key is before the searcher's position<br>
+   * @param searcherIsAfterInputKey &lt;0: input key is before the searcher's position<br/>
    *          &gt;0: input key is after the searcher's position
    */
   protected CellScannerPosition fixRowTokenMissReverse(int searcherIsAfterInputKey) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
index 25bee1f..7da78a7 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
@@ -33,7 +33,6 @@ import com.google.common.collect.Lists;
 
 /**
  * Individual node in a Trie structure.  Each node is one of 3 types:
- * <ul>
  * <li>Branch: an internal trie node that may have a token and must have multiple children, but does
  * not represent an actual input byte[], hence its numOccurrences is 0
  * <li>Leaf: a node with no children and where numOccurrences is &gt;= 1.  It's token represents the
@@ -41,7 +40,6 @@ import com.google.common.collect.Lists;
  * <li>Nub: a combination of a branch and leaf.  Its token represents the last bytes of input
  * byte[]s and has numOccurrences &gt;= 1, but it also has child nodes which represent input byte[]s
  * that add bytes to this nodes input byte[].
- * </ul>
  * <br><br>
  * Example inputs (numInputs=7):
  * 0: AAA
@@ -550,8 +548,7 @@ public class TokenizerNode{
   /********************** simple mutation methods *************************/
 
   /**
-   * Each occurrence &gt; 1 indicates a repeat of the previous entry.
-   * This can be called directly by
+   * Each occurrence > 1 indicates a repeat of the previous entry.  This can be called directly by
    * an external class without going through the process of detecting a repeat if it is a known
    * repeat by some external mechanism.  PtEncoder uses this when adding cells to a row if it knows
    * the new cells are part of the current row.

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
index 4668468..a3ae097 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
@@ -50,8 +50,8 @@ public interface CellSearcher extends ReversibleCellScanner {
    * exact match.
    * </p>
    * @param key position the CellScanner on this key or the closest cell before
-   * @return AT if exact match<br>
-   *         BEFORE if on last cell before key<br>
+   * @return AT if exact match<br/>
+   *         BEFORE if on last cell before key<br/>
    *         BEFORE_FIRST if key was before the first cell in this scanner's scope
    */
   CellScannerPosition positionAtOrBefore(Cell key);
@@ -62,8 +62,8 @@ public interface CellSearcher extends ReversibleCellScanner {
    * match.
    * </p>
    * @param key position the CellScanner on this key or the closest cell after
-   * @return AT if exact match<br>
-   *         AFTER if on first cell after key<br>
+   * @return AT if exact match<br/>
+   *         AFTER if on first cell after key<br/>
    *         AFTER_LAST if key was after the last cell in this scanner's scope
    */
   CellScannerPosition positionAtOrAfter(Cell key);

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java
index 6a31eef..a05c115 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java
@@ -64,6 +64,7 @@ public interface ProcedureStore {
     /**
      * Returns the next procedure in the iteration.
      * @throws IOException if there was an error fetching/deserializing the procedure
+     * @throws NoSuchElementException if the iteration has no more elements
      * @return the next procedure in the iteration.
      */
     Procedure next() throws IOException;
@@ -167,4 +168,4 @@ public interface ProcedureStore {
    * @param procId the ID of the procedure to remove.
    */
   void delete(long procId);
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java b/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java
index 9d75612..4ffd590 100644
--- a/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java
+++ b/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java
@@ -62,7 +62,6 @@ public final class HBaseZeroCopyByteString extends LiteralByteString {
    * Extracts the byte array from the given {@link ByteString} without copy.
    * @param buf A buffer from which to extract the array.  This buffer must be
    * actually an instance of a {@code LiteralByteString}.
-   * @return byte[] representation
    */
   public static byte[] zeroCopyGetBytes(final ByteString buf) {
     if (buf instanceof LiteralByteString) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java
index bf94757..17bee5e 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java
@@ -74,8 +74,6 @@ public class ProtobufMagic {
 
   /**
    * @param bytes Bytes to check.
-   * @param offset offset to start at
-   * @param len length to use
    * @return True if passed <code>bytes</code> has {@link #PB_MAGIC} for a prefix.
    */
   public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
index e8845eb..2809ca9 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
@@ -100,7 +100,7 @@ public class RemoteAdmin {
 
   /**
    * @return string representing the rest api's version
-   * @throws IOException
+   * @throws IOEXception
    *           if the endpoint does not exist, there is a timeout, or some other
    *           general failure mode
    */
@@ -144,7 +144,7 @@ public class RemoteAdmin {
 
   /**
    * @return string representing the cluster's version
-   * @throws IOException if the endpoint does not exist, there is a timeout, or some other general failure mode
+   * @throws IOEXception if the endpoint does not exist, there is a timeout, or some other general failure mode
    */
   public StorageClusterStatusModel getClusterStatus() throws IOException {
 
@@ -185,7 +185,7 @@ public class RemoteAdmin {
 
   /**
    * @return string representing the cluster's version
-   * @throws IOException
+   * @throws IOEXception
    *           if the endpoint does not exist, there is a timeout, or some other
    *           general failure mode
    */
@@ -357,7 +357,7 @@ public class RemoteAdmin {
 
   /**
    * @return string representing the cluster's version
-   * @throws IOException
+   * @throws IOEXception
    *           if the endpoint does not exist, there is a timeout, or some other
    *           general failure mode
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
index 55f5769..25a6de3 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
@@ -87,7 +87,7 @@ import com.sun.jersey.api.json.JSONUnmarshaller;
  * 
  * <pre>
  * &lt;complexType name="Scanner"&gt;
- *   &lt;sequence&gt;
+ *   &lt;sequence>
  *     &lt;element name="column" type="base64Binary" minOccurs="0" maxOccurs="unbounded"/&gt;
  *     &lt;element name="filter" type="string" minOccurs="0" maxOccurs="1"&gt;&lt;/element&gt;
  *   &lt;/sequence&gt;

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index fd19ede..93eb5f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -1096,7 +1096,7 @@ public interface RegionObserver extends Coprocessor {
    * <li>
    * <code>boolean filterRow()</code> returning true</li>
    * <li>
-   * <code>void filterRow(List&lt;KeyValue&gt; kvs)</code> removing all the kvs from
+   * <code>void filterRow(List<KeyValue> kvs)</code> removing all the kvs from
    * the passed List</li>
    * </ol>
    * @param c the environment provided by the region server

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 29a2c51..4ce2d94 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -96,7 +96,7 @@ import com.sun.jersey.spi.container.servlet.ServletContainer;
  * There are three contexts:
  *   "/logs/" -&gt; points to the log directory
  *   "/static/" -&gt; points to common static files (src/webapps/static)
- *   "/" -&gt; the jsp server code from (src/webapps/&lt;name&gt;)
+ *   "/" -&gt; the jsp server code from (src/webapps/<name>)
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -447,7 +447,7 @@ public class HttpServer implements FilterContainer {
 
   /**
    * Create a status server on the given port.
-   * The jsp scripts are taken from src/webapps/&lt;name&gt;.
+   * The jsp scripts are taken from src/webapps/<name>.
    * @param name The name of the server
    * @param bindAddress The address for this server
    * @param port The port to use on the server
@@ -466,7 +466,7 @@ public class HttpServer implements FilterContainer {
 
   /**
    * Create a status server on the given port.
-   * The jsp scripts are taken from src/webapps/&lt;name&gt;.
+   * The jsp scripts are taken from src/webapps/<name>.
    * @param name The name of the server
    * @param bindAddress The address for this server
    * @param port The port to use on the server

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java
index 5ff6370..1e76da9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java
@@ -32,9 +32,9 @@ import org.apache.hadoop.conf.Configuration;
  * Create a Jetty embedded server to answer http requests. The primary goal
  * is to serve up status information for the server.
  * There are three contexts:
- *   "/stacks/" -&gt; points to stack trace
- *   "/static/" -&gt; points to common static files (src/hbase-webapps/static)
- *   "/" -&gt; the jsp server code from (src/hbase-webapps/&lt;name&gt;)
+ *   "/stacks/" -> points to stack trace
+ *   "/static/" -> points to common static files (src/hbase-webapps/static)
+ *   "/" -> the jsp server code from (src/hbase-webapps/<name>)
  */
 @InterfaceAudience.Private
 public class InfoServer {
@@ -44,7 +44,7 @@ public class InfoServer {
 
   /**
    * Create a status server on the given port.
-   * The jsp scripts are taken from src/hbase-webapps/<code>name</code>.
+   * The jsp scripts are taken from src/hbase-webapps/<code>name<code>.
    * @param name The name of the server
    * @param bindAddress address to bind to
    * @param port The port to use on the server

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
index dbe8b24..498e213 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
@@ -51,34 +51,28 @@ import org.apache.hadoop.hbase.util.JSONBean;
  * functionality is provided through the
  * {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)}
  * method.
- * </p>
  * <p>
  * For example <code>http://.../jmx?qry=Hadoop:*</code> will return
  * all hadoop metrics exposed through JMX.
- * </p>
  * <p>
  * The optional <code>get</code> parameter is used to query an specific 
  * attribute of a JMX bean.  The format of the URL is
- * <code>http://.../jmx?get=MXBeanName::AttributeName</code>
- * </p>
+ * <code>http://.../jmx?get=MXBeanName::AttributeName<code>
  * <p>
  * For example 
  * <code>
  * http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId
  * </code> will return the cluster id of the namenode mxbean.
- * </p>
  * <p>
  * If the <code>qry</code> or the <code>get</code> parameter is not formatted 
  * correctly then a 400 BAD REQUEST http response code will be returned. 
- * </p>
  * <p>
  * If a resouce such as a mbean or attribute can not be found, 
  * a 404 SC_NOT_FOUND http response code will be returned. 
- * </p>
  * <p>
  * The return format is JSON and in the form
- * </p>
- *  <pre><code>
+ * <p>
+ *  <code><pre>
  *  {
  *    "beans" : [
  *      {
@@ -87,7 +81,7 @@ import org.apache.hadoop.hbase.util.JSONBean;
  *      }
  *    ]
  *  }
- *  </code></pre>
+ *  </pre></code>
  *  <p>
  *  The servlet attempts to convert the the JMXBeans into JSON. Each
  *  bean's attributes will be converted to a JSON object member.
@@ -107,7 +101,6 @@ import org.apache.hadoop.hbase.util.JSONBean;
  *  The bean's name and modelerType will be returned for all beans.
  *
  *  Optional paramater "callback" should be used to deliver JSONP response.
- * </p>
  *  
  */
 public class JMXJsonServlet extends HttpServlet {
@@ -218,4 +211,4 @@ public class JMXJsonServlet extends HttpServlet {
       response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
     }
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java
index 324cc2d..9012ab6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java
@@ -19,7 +19,7 @@
  * This package provides access to JMX primarily through the
  * {@link org.apache.hadoop.hbase.http.jmx.JMXJsonServlet} class.
  * <p>
- * Copied from hadoop source code.<br>
+ * Copied from hadoop source code.<br/>
  * See https://issues.apache.org/jira/browse/HADOOP-10232 to know why.
  * </p>
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java
index 1734f40..469c075 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java
@@ -26,7 +26,7 @@
  * users a static configured user.
  * </ul>
  * <p>
- * Copied from hadoop source code.<br>
+ * Copied from hadoop source code.<br/>
  * See https://issues.apache.org/jira/browse/HADOOP-10232 to know why
  * </p>
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 067d639..50a5baa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -52,14 +52,13 @@ import org.apache.hadoop.util.StringUtils;
 
 /**
  * Provides functionality to write ({@link BlockIndexWriter}) and read
- * ({@link BlockIndexReader})
+ * ({@link org.apache.hadoop.hbase.io.hfile.BlockIndexReader})
  * single-level and multi-level block indexes.
  *
  * Examples of how to use the block index writer can be found in
  * {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and
  *  {@link HFileWriterImpl}. Examples of how to use the reader can be
- *  found in {@link HFileWriterImpl} and
- *  {@link org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex}.
+ *  found in {@link HFileWriterImpl} and TestHFileBlockIndex.
  */
 @InterfaceAudience.Private
 public class HFileBlockIndex {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
index faa8724..4e3929d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
@@ -407,8 +407,7 @@ public final class BucketAllocator {
   /**
    * Allocate a block with specified size. Return the offset
    * @param blockSize size of block
-   * @throws BucketAllocatorException
-   * @throws CacheFullException
+   * @throws BucketAllocatorException,CacheFullException
    * @return the offset in the IOEngine
    */
   public synchronized long allocateBlock(int blockSize) throws CacheFullException,

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
index 2026feb..f8ccea3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
@@ -168,7 +168,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
 
   /**
    * Calculates the splits that will serve as input for the map tasks.
-   * 
+   * <ul>
    * Splits are created in number equal to the smallest between numSplits and
    * the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table. 
    * If the number of splits is smaller than the number of 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
index 5c46f2a..e9ce5a3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
@@ -118,11 +118,11 @@ public class MultiTableSnapshotInputFormatImpl {
   }
 
   /**
-   * Retrieve the snapshot name -&gt; list&lt;scan&gt; mapping pushed to configuration by
+   * Retrieve the snapshot name -> list<scan> mapping pushed to configuration by
    * {@link #setSnapshotToScans(org.apache.hadoop.conf.Configuration, java.util.Map)}
    *
-   * @param conf Configuration to extract name -&gt; list&lt;scan&gt; mappings from.
-   * @return the snapshot name -&gt; list&lt;scan&gt; mapping pushed to configuration
+   * @param conf Configuration to extract name -> list<scan> mappings from.
+   * @return the snapshot name -> list<scan> mapping pushed to configuration
    * @throws IOException
    */
   public Map<String, Collection<Scan>> getSnapshotsToScans(Configuration conf) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseHFileCleanerDelegate.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseHFileCleanerDelegate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseHFileCleanerDelegate.java
index 8bc436f..c41439d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseHFileCleanerDelegate.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseHFileCleanerDelegate.java
@@ -25,17 +25,14 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
  * <p>
  * If other effects are needed, implement your own LogCleanerDelegate and add it to the
  * configuration "hbase.master.hfilecleaner.plugins", which is a comma-separated list of fully
- * qualified class names. The <code>HFileCleaner</code> will build the cleaner chain in 
+ * qualified class names. The <code>HFileCleaner<code> will build the cleaner chain in 
  * order the order specified by the configuration.
- * </p>
  * <p>
  * For subclasses, setConf will be called exactly <i>once</i> before using the cleaner.
- * </p>
  * <p>
  * Since {@link BaseHFileCleanerDelegate HFileCleanerDelegates} are created in
  * HFileCleaner by reflection, classes that implements this interface <b>must</b>
  * provide a default constructor.
- * </p>
  */
 @InterfaceAudience.Private
 public abstract class BaseHFileCleanerDelegate extends BaseFileCleanerDelegate {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
index e93ad57..af3d302 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
@@ -232,6 +232,7 @@ public abstract class TableEventHandler extends EventHandler {
    * Gets a TableDescriptor from the masterServices.  Can Throw exceptions.
    *
    * @return Table descriptor for this table
+   * @throws TableExistsException
    * @throws FileNotFoundException
    * @throws IOException
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index b6b9fe5..b21f4e7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -62,7 +62,6 @@ import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
  * <li>SnapshotDescription is readable</li>
  * <li>Table info is readable</li>
  * <li>Regions</li>
- * </ol>
  * <ul>
  * <li>Matching regions in the snapshot as currently in the table</li>
  * <li>{@link HRegionInfo} matches the current and stored regions</li>
@@ -70,6 +69,7 @@ import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
  * <li>All the hfiles are present (either in .archive directory in the region)</li>
  * <li>All recovered.edits files are present (by name) and have the correct file size</li>
  * </ul>
+ * </ol>
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index a3e862d..89807b0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3776,6 +3776,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
    * the maxSeqId for the store to be applied, else its skipped.
    * @return the sequence id of the last edit added to this region out of the
    * recovered edits log or <code>minSeqId</code> if nothing added from editlogs.
+   * @throws UnsupportedEncodingException
    * @throws IOException
    */
   protected long replayRecoveredEditsIfAny(final Path regiondir,

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
index 630ca7d..048d128 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
@@ -27,20 +27,18 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
 
 /**
- * <p>
  * Compaction configuration for a particular instance of HStore.
  * Takes into account both global settings and ones set on the column family/store.
  * Control knobs for default compaction algorithm:
- * </p>
- * <p>
+ * <p/>
  * maxCompactSize - upper bound on file size to be included in minor compactions
  * minCompactSize - lower bound below which compaction is selected without ratio test
  * minFilesToCompact - lower bound on number of files in any minor compaction
  * maxFilesToCompact - upper bound on number of files in any minor compaction
  * compactionRatio - Ratio used for compaction
  * minLocalityToForceCompact - Locality threshold for a store file to major compact (HBASE-11195)
- * </p>
- * Set parameter as "hbase.hstore.compaction.&lt;attribute&gt;"
+ * <p/>
+ * Set parameter as "hbase.hstore.compaction.<attribute>"
  */
 
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index 26fef53..15ead14 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
@@ -226,7 +226,7 @@ public abstract class Compactor {
    * @param scanner Where to read from.
    * @param writer Where to write to.
    * @param smallestReadPoint Smallest read point.
-   * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is &lt;= smallestReadPoint
+   * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= smallestReadPoint
    * @return Whether compaction ended; false if it was interrupted for some reason.
    */
   protected boolean performCompaction(InternalScanner scanner, CellSink writer,

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
index 6337e28..92ab4d1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
@@ -32,31 +32,26 @@ import java.io.IOException;
 import java.util.concurrent.atomic.AtomicLong;
 
 /**
- * <p>
  * This coprocessor 'shallows' all the writes. It allows to test a pure
  * write workload, going through all the communication layers.
  * The reads will work as well, but they as we never write, they will always always
  * return an empty structure. The WAL is also skipped.
  * Obviously, the region will never be split automatically. It's up to the user
  * to split and move it.
- * </p>
- * <p>
+ * <p/>
  * For a table created like this:
- * create 'usertable', {NAME =&gt; 'f1', VERSIONS =&gt; 1}
- * </p>
- * <p>
+ * create 'usertable', {NAME => 'f1', VERSIONS => 1}
+ * <p/>
  * You can then add the coprocessor with this command:
- * alter 'usertable', 'coprocessor' =&gt; '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|'
- * </p>
- * <p>
+ * alter 'usertable', 'coprocessor' => '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|'
+ * <p/>
  * And then
  * put 'usertable', 'f1', 'f1', 'f1'
- * </p>
- * <p>
+ * <p/>
  * scan 'usertable'
  * Will return:
  * 0 row(s) in 0.0050 seconds
- * </p>
+ * <p/>
  */
 public class WriteSinkCoprocessor extends BaseRegionObserver {
   private static final Log LOG = LogFactory.getLog(WriteSinkCoprocessor.class);

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
index 9fff872..5b6cb36 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
@@ -88,7 +88,7 @@ public class BloomFilterChunk implements BloomFilterBase {
   }
 
   /**
-   * Determines &amp; initializes bloom filter meta data from user config. Call
+   * Determines & initializes bloom filter meta data from user config. Call
    * {@link #allocBloom()} to allocate bloom filter data.
    *
    * @param maxKeys Maximum expected number of keys that will be stored in this

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
index 29ab24e..23dc570 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
@@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
 /**
  * This class marches through all of the region's hfiles and verifies that
  * they are all valid files. One just needs to instantiate the class, use
- * checkTables(List&lt;Path&gt;) and then retrieve the corrupted hfiles (and
+ * checkTables(List<Path>) and then retrieve the corrupted hfiles (and
  * quarantined files if in quarantining mode)
  *
  * The implementation currently parallelizes at the regionDir level.

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
index d851d57..f889672 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
@@ -304,8 +304,8 @@ public class DefaultWALProvider implements WALProvider {
    * This function returns region server name from a log file name which is in one of the following
    * formats:
    * <ul>
-   *   <li>hdfs://&lt;name node&gt;/hbase/.logs/&lt;server name&gt;-splitting/...</li>
-   *   <li>hdfs://&lt;name node&gt;/hbase/.logs/&lt;server name&gt;/...</li>
+   *   <li>hdfs://<name node>/hbase/.logs/<server name>-splitting/...
+   *   <li>hdfs://<name node>/hbase/.logs/<server name>/...
    * </ul>
    * @param logFile
    * @return null if the passed in logFile isn't a valid WAL file path

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 9a26a24..fc43765 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -2228,13 +2228,13 @@ public class WALSplitter {
   }
 
   /**
-   * This function is used to construct mutations from a WALEntry. It also
-   * reconstructs WALKey &amp; WALEdit from the passed in WALEntry
+   * This function is used to construct mutations from a WALEntry. It also reconstructs WALKey &
+   * WALEdit from the passed in WALEntry
    * @param entry
    * @param cells
    * @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
    *          extracted from the passed in WALEntry.
-   * @return list of Pair&lt;MutationType, Mutation&gt; to be replayed
+   * @return list of Pair<MutationType, Mutation> to be replayed
    * @throws IOException
    */
   public static List<MutationReplay> getMutationsFromWALEntry(WALEntry entry, CellScanner cells,

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e6373e8/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java
index a6371cc..beb3fe9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java
@@ -417,7 +417,7 @@ public abstract class ZKInterProcessLockBase implements InterProcessLock {
 
   /**
    * Visits the locks (both held and attempted) with the given MetadataHandler.
-   * @throws IOException If there is an unrecoverable error
+   * @throws InterruptedException If there is an unrecoverable error
    */
   public void visitLocks(MetadataHandler handler) throws IOException {
     List<String> children;