You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2015/06/14 02:45:19 UTC

[1/3] hbase git commit: HBASE-13569 Correct Javadoc (for Java8)

Repository: hbase
Updated Branches:
  refs/heads/master 293506c7c -> 682b8ab8a


http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index a64bb94..f60272f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -67,16 +67,16 @@ import com.google.common.base.Preconditions;
  * <li>Uncompressed block size, excluding header, excluding checksum (4 bytes)
  * <li>The offset of the previous block of the same type (8 bytes). This is
  * used to be able to navigate to the previous block without going to the block
- * <li>For minorVersions >=1, the ordinal describing checksum type (1 byte)
- * <li>For minorVersions >=1, the number of data bytes/checksum chunk (4 bytes)
- * <li>For minorVersions >=1, the size of data on disk, including header,
+ * <li>For minorVersions &gt;=1, the ordinal describing checksum type (1 byte)
+ * <li>For minorVersions &gt;=1, the number of data bytes/checksum chunk (4 bytes)
+ * <li>For minorVersions &gt;=1, the size of data on disk, including header,
  * excluding checksums (4 bytes)
  * </ul>
  * </li>
  * <li>Raw/Compressed/Encrypted/Encoded data. The compression algorithm is the
  * same for all the blocks in the {@link HFile}, similarly to what was done in
  * version 1.
- * <li>For minorVersions >=1, a series of 4 byte checksums, one each for
+ * <li>For minorVersions &gt;=1, a series of 4 byte checksums, one each for
  * the number of bytes specified by bytesPerChecksum.
  * </ul>
  * </ul>
@@ -1239,8 +1239,8 @@ public class HFileBlock implements Cacheable {
 
     /**
      * Creates a block iterator over the given portion of the {@link HFile}.
-     * The iterator returns blocks starting with offset such that offset <=
-     * startOffset < endOffset. Returned blocks are always unpacked.
+     * The iterator returns blocks starting with offset such that offset &lt;=
+     * startOffset &lt; endOffset. Returned blocks are always unpacked.
      *
      * @param startOffset the offset of the block to start iteration with
      * @param endOffset the offset to end iteration at (exclusive)

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 642b6c7..c6655c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -1002,7 +1002,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
 
     /**
      * @param v
-     * @return True if v < 0 or v > current block buffer limit.
+     * @return True if v &lt; 0 or v &gt; current block buffer limit.
      */
     protected final boolean checkLen(final int v) {
       return v < 0 || v > this.blockBuffer.limit();

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
index 6b527f6..4d9990e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
@@ -44,11 +44,11 @@ public interface HFileScanner {
    * Consider the cell stream of all the cells in the file,
    * <code>c[0] .. c[n]</code>, where there are n cells in the file.
    * @param cell
-   * @return -1, if cell < c[0], no position;
+   * @return -1, if cell &lt; c[0], no position;
    * 0, such that c[i] = cell and scanner is left in position i; and
-   * 1, such that c[i] < cell, and scanner is left in position i.
+   * 1, such that c[i] &lt; cell, and scanner is left in position i.
    * The scanner will position itself between c[i] and c[i+1] where
-   * c[i] < cell <= c[i+1].
+   * c[i] &lt; cell &lt;= c[i+1].
    * If there is no cell c[i+1] greater than or equal to the input cell, then the
    * scanner will position itself at the end of the file and next() will return
    * false when it is called.
@@ -66,14 +66,14 @@ public interface HFileScanner {
    * <code>c[0] .. c[n]</code>, where there are n cellc in the file after
    * current position of HFileScanner.
    * The scanner will position itself between c[i] and c[i+1] where
-   * c[i] < cell <= c[i+1].
+   * c[i] &lt; cell &lt;= c[i+1].
    * If there is no cell c[i+1] greater than or equal to the input cell, then the
    * scanner will position itself at the end of the file and next() will return
    * false when it is called.
    * @param cell Cell to find (should be non-null)
-   * @return -1, if cell < c[0], no position;
+   * @return -1, if cell &lt; c[0], no position;
    * 0, such that c[i] = cell and scanner is left in position i; and
-   * 1, such that c[i] < cell, and scanner is left in position i.
+   * 1, such that c[i] &lt; cell, and scanner is left in position i.
    * @throws IOException
    */
   int reseekTo(Cell cell) throws IOException;
@@ -82,9 +82,9 @@ public interface HFileScanner {
    * Consider the cell stream of all the cells in the file,
    * <code>c[0] .. c[n]</code>, where there are n cells in the file.
    * @param cell Cell to find
-   * @return false if cell <= c[0] or true with scanner in position 'i' such
-   * that: c[i] < cell.  Furthermore: there may be a c[i+1], such that
-   * c[i] < cell <= c[i+1] but there may also NOT be a c[i+1], and next() will
+   * @return false if cell &lt;= c[0] or true with scanner in position 'i' such
+   * that: c[i] &lt; cell.  Furthermore: there may be a c[i+1], such that
+   * c[i] &lt; cell &lt;= c[i+1] but there may also NOT be a c[i+1], and next() will
    * return false (EOF).
    * @throws IOException
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index 18dcbb0..806ddc9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -480,7 +480,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
   }
 
   /**
-   * Evict the block, and it will be cached by the victim handler if exists &&
+   * Evict the block, and it will be cached by the victim handler if exists &amp;&amp;
    * block may be read again later
    * @param block
    * @param evictedByEvictionProcess true if the given block is evicted by

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
index 1624082..0b28d72 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.io.HeapSize;
 
 /**
  * A memory-bound queue that will grow until an element brings
- * total size >= maxSize.  From then on, only entries that are sorted larger
+ * total size &gt;= maxSize.  From then on, only entries that are sorted larger
  * than the smallest current entry will be inserted/replaced.
  *
  * <p>Use this when you want to find the largest elements (according to their

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java
index f298698..d4a279c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java
@@ -37,7 +37,7 @@
  * (roughly because GC is less). See Nick Dimiduk's
  * <a href="http://www.n10k.com/blog/blockcache-101/">BlockCache 101</a> for some numbers.
  *
- * <h1>Enabling {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}</h2>
+ * <h1>Enabling {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}</h1>
  * See the HBase Reference Guide <a href="http://hbase.apache.org/book.html#enable.bucketcache">Enable BucketCache</a>.
  *
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
index 0da16a7..bb63e01 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
@@ -27,7 +27,7 @@ public interface RpcCallContext extends Delayable {
   /**
    * Check if the caller who made this IPC call has disconnected.
    * If called from outside the context of IPC, this does nothing.
-   * @return < 0 if the caller is still connected. The time in ms
+   * @return &lt; 0 if the caller is still connected. The time in ms
    *  since the disconnection otherwise
    */
   long disconnectSince();

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java
index 86fc5df..48a982b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.client.Scan;
  * </p>
  *
  * <pre>
- * List<Scan> scans = new ArrayList<Scan>();
+ * List&lt;Scan&gt; scans = new ArrayList&lt;Scan&gt;();
  * 
  * Scan scan1 = new Scan();
  * scan1.setStartRow(firstRow1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 769c40b..cb9759d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -513,7 +513,8 @@ public class TableMapReduceUtil {
    * and add it to the credentials for the given map reduce job.
    *
    * The quorumAddress is the key to the ZK ensemble, which contains:
-   * hbase.zookeeper.quorum, hbase.zookeeper.client.port and zookeeper.znode.parent
+   * hbase.zookeeper.quorum, hbase.zookeeper.client.port and
+   * zookeeper.znode.parent
    *
    * @param job The job that requires the permission.
    * @param quorumAddress string that contains the 3 required configuratins
@@ -619,7 +620,8 @@ public class TableMapReduceUtil {
    * default; e.g. copying tables between clusters, the source would be
    * designated by <code>hbase-site.xml</code> and this param would have the
    * ensemble address of the remote cluster.  The format to pass is particular.
-   * Pass <code> &lt;hbase.zookeeper.quorum>:&lt;hbase.zookeeper.client.port>:&lt;zookeeper.znode.parent>
+   * Pass <code> &lt;hbase.zookeeper.quorum&gt;:&lt;
+   *             hbase.zookeeper.client.port&gt;:&lt;zookeeper.znode.parent&gt;
    * </code> such as <code>server,server2,server3:2181:/hbase</code>.
    * @param serverClass redefined hbase.regionserver.class
    * @param serverImpl redefined hbase.regionserver.impl
@@ -650,7 +652,8 @@ public class TableMapReduceUtil {
    * default; e.g. copying tables between clusters, the source would be
    * designated by <code>hbase-site.xml</code> and this param would have the
    * ensemble address of the remote cluster.  The format to pass is particular.
-   * Pass <code> &lt;hbase.zookeeper.quorum>:&lt;hbase.zookeeper.client.port>:&lt;zookeeper.znode.parent>
+   * Pass <code> &lt;hbase.zookeeper.quorum&gt;:&lt;
+   *             hbase.zookeeper.client.port&gt;:&lt;zookeeper.znode.parent&gt;
    * </code> such as <code>server,server2,server3:2181:/hbase</code>.
    * @param serverClass redefined hbase.regionserver.class
    * @param serverImpl redefined hbase.regionserver.impl

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
index f859780..5bd8d15 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
@@ -139,7 +139,8 @@ public class TableRecordReaderImpl {
   /**
    * Build the scanner. Not done in constructor to allow for extension.
    *
-   * @throws IOException, InterruptedException
+   * @throws IOException
+   * @throws InterruptedException
    */
   public void initialize(InputSplit inputsplit,
       TaskAttemptContext context) throws IOException,

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index bcb652b..86bcdae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2086,7 +2086,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
   /**
    * Report whether this master has started initialization and is about to do meta region assignment
-   * @return true if master is in initialization & about to assign hbase:meta regions
+   * @return true if master is in initialization &amp; about to assign hbase:meta regions
    */
   public boolean isInitializationStartsMetaRegionAssignment() {
     return this.initializationBeforeMetaAssignment;

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index e82bd54..50070ef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -765,7 +765,7 @@ public class MasterRpcServices extends RSRpcServices
    * @return Pair indicating the number of regions updated Pair.getFirst is the
    *         regions that are yet to be updated Pair.getSecond is the total number
    *         of regions of the table
-   * @throws IOException
+   * @throws ServiceException
    */
   @Override
   public GetSchemaAlterStatusResponse getSchemaAlterStatus(

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
index 62b7333..c3634e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
@@ -53,7 +53,7 @@ import com.google.common.base.Preconditions;
 public class RegionStateStore {
   private static final Log LOG = LogFactory.getLog(RegionStateStore.class);
 
-  /** The delimiter for meta columns for replicaIds > 0 */
+  /** The delimiter for meta columns for replicaIds &gt; 0 */
   protected static final char META_REPLICA_ID_DELIMITER = '_';
 
   private volatile Region metaRegion;

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
index 9673acf..fad84f5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
@@ -132,7 +132,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
    *     Order the regions to move from most recent to least.
    *
    * <li>Iterate down the least loaded servers, assigning regions so each server
-   *     has exactly </b>MIN</b> regions.  Stop once you reach a server that
+   *     has exactly <b>MIN</b> regions.  Stop once you reach a server that
    *     already has &gt;= <b>MIN</b> regions.
    *
    *     Regions being assigned to underloaded servers are those that were shed
@@ -159,7 +159,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
    *
    * <li>If we still have more regions that need assignment, again iterate the
    *     least loaded servers, this time giving each one (filling them to
-   *     </b>MAX</b>) until we run out.
+   *     <b>MAX</b>) until we run out.
    *
    * <li>All servers will now either host <b>MIN</b> or <b>MAX</b> regions.
    *

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index e58f855..4955cfa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -49,8 +49,8 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
- * <p>This is a best effort load balancer. Given a Cost function F(C) => x It will
- * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the
+ * <p>This is a best effort load balancer. Given a Cost function F(C) =&gt; x It will
+ * randomly try and mutate the cluster to Cprime. If F(Cprime) &lt; F(C) then the
  * new cluster state becomes the plan. It includes costs functions to compute the cost of:</p>
  * <ul>
  * <li>Region Load</li>

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
index 662e4bd..d352561 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
@@ -31,15 +31,17 @@ import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode;
  * <p>
  * Currently there are two different types of Store/Family-level queries.
  * <ul><li>{@link ExplicitColumnTracker} is used when the query specifies
- * one or more column qualifiers to return in the family.
- * <ul><li>{@link ScanWildcardColumnTracker} is used when no columns are
- * explicitly specified.
+ * one or more column qualifiers to return in the family.</li>
+ * <li>{@link ScanWildcardColumnTracker} is used when no columns are
+ * explicitly specified.</li>
+ * </ul>
  * <p>
  * This class is utilized by {@link ScanQueryMatcher} mainly through two methods:
  * <ul><li>{@link #checkColumn} is called when a Put satisfies all other
- * conditions of the query.
- * <ul><li>{@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher
- * believes that the current column should be skipped (by timestamp, filter etc.)
+ * conditions of the query.</li>
+ * <li>{@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher
+ * believes that the current column should be skipped (by timestamp, filter etc.)</li>
+ * </ul>
  * <p>
  * These two methods returns a 
  * {@link org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode}

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
index d40b21d..930baf0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
@@ -68,7 +68,7 @@ public interface CompactionRequestor {
   /**
    * @param r Region to compact
    * @param why Why compaction was requested -- used in debug messages
-   * @param pri Priority of this compaction. minHeap. <=0 is critical
+   * @param pri Priority of this compaction. minHeap. &lt;=0 is critical
    * @param requests custom compaction requests. Each compaction must specify the store on which it
    *          is acting. Can be <tt>null</tt> in which case a compaction will be attempted on all
    *          stores for the region.
@@ -84,7 +84,7 @@ public interface CompactionRequestor {
    * @param r Region to compact
    * @param s Store within region to compact
    * @param why Why compaction was requested -- used in debug messages
-   * @param pri Priority of this compaction. minHeap. <=0 is critical
+   * @param pri Priority of this compaction. minHeap. &lt;=0 is critical
    * @param request custom compaction request to run. {@link Store} and {@link Region} for the
    *          request must match the region and store specified here.
    * @return The created {@link CompactionRequest} or <tt>null</tt> if no compaction was started

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index 9839124..c24d6df 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -222,7 +222,7 @@ public class DefaultMemStore implements MemStore {
   /**
    * Write an update
    * @param cell
-   * @return approximate size of the passed KV & newly added KV which maybe different than the
+   * @return approximate size of the passed KV &amp; newly added KV which maybe different than the
    *         passed-in KV
    */
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
index 70254fe..8f466fc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
@@ -26,9 +26,10 @@ import org.apache.hadoop.hbase.Cell;
  * during the course of a Get or Scan operation.
  * <p>
  * This class is utilized through three methods:
- * <ul><li>{@link #add} when encountering a Delete
- * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
- * <li>{@link #update} when reaching the end of a StoreFile
+ * <ul><li>{@link #add} when encountering a Delete</li>
+ * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted</li>
+ * <li>{@link #update} when reaching the end of a StoreFile</li>
+ * </ul>
  */
 @InterfaceAudience.Private
 public interface DeleteTracker {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
index cbf7719..9e80d8d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
@@ -41,9 +41,10 @@ import org.apache.hadoop.hbase.util.Bytes;
  * <p>
  * This class is utilized by {@link ScanQueryMatcher} mainly through two methods:
  * <ul><li>{@link #checkColumn} is called when a Put satisfies all other
- * conditions of the query.
- * <ul><li>{@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher
- * believes that the current column should be skipped (by timestamp, filter etc.)
+ * conditions of the query.</li>
+ * <li>{@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher
+ * believes that the current column should be skipped (by timestamp, filter etc.)</li>
+ * </ul>
  * <p>
  * These two methods returns a 
  * {@link org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode}

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
index 5448025..3deb258 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
@@ -206,7 +206,7 @@ public class HeapMemoryManager {
   }
 
   /**
-   * @return heap occupancy percentage, 0 <= n <= 1
+   * @return heap occupancy percentage, 0 &lt;= n &lt;= 1
    */
   public float getHeapOccupancyPercent() {
     return this.heapOccupancyPercent;

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
index 476bcdb..1439388 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
@@ -93,8 +93,8 @@ extends ConstantSizeRegionSplitPolicy {
   }
 
   /**
-   * @return Region max size or <code>count of regions squared * flushsize, which ever is
-   * smaller; guard against there being zero regions on this server.
+   * @return Region max size or <code>count of regions squared * flushsize</code>,
+   * which ever is smaller; guard against there being zero regions on this server.
    */
   protected long getSizeToCheck(final int tableRegionsCount) {
     // safety check for 100 to avoid numerical overflow in extreme cases

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
index 18f5198..b68868e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
@@ -36,7 +36,7 @@ import java.util.Set;
  * The LruHashMap is a memory-aware HashMap with a configurable maximum
  * memory footprint.
  * <p>
- * It maintains an ordered list of all entries in the map ordered by
+ * It maintains an ordered list of all entries in the/ map ordered by
  * access time.  When space needs to be freed becase the maximum has been
  * reached, or the application has asked to free memory, entries will be
  * evicted according to an LRU (least-recently-used) algorithm.  That is,
@@ -102,7 +102,7 @@ implements HeapSize, Map<K,V> {
    * @throws IllegalArgumentException if the initial capacity is less than one
    * @throws IllegalArgumentException if the initial capacity is greater than
    * the maximum capacity
-   * @throws IllegalArgumentException if the load factor is <= 0
+   * @throws IllegalArgumentException if the load factor is &lt;= 0
    * @throws IllegalArgumentException if the max memory usage is too small
    * to support the base overhead
    */
@@ -141,7 +141,7 @@ implements HeapSize, Map<K,V> {
    * @throws IllegalArgumentException if the initial capacity is less than one
    * @throws IllegalArgumentException if the initial capacity is greater than
    * the maximum capacity
-   * @throws IllegalArgumentException if the load factor is <= 0
+   * @throws IllegalArgumentException if the load factor is &lt;= 0
    */
   public LruHashMap(int initialCapacity, float loadFactor) {
     this(initialCapacity, loadFactor, DEFAULT_MAX_MEM_USAGE);

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java
index 87710df..0566dca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.util.StringUtils;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
- * A pool of {@link HeapMemStoreLAB$Chunk} instances.
+ * A pool of {@link HeapMemStoreLAB.Chunk} instances.
  * 
  * MemStoreChunkPool caches a number of retired chunks for reusing, it could
  * decrease allocating bytes when writing, thereby optimizing the garbage

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
index 9f98ba6..b2cb772 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
@@ -22,9 +22,10 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
 
 /**
+ * <p>
  * This class is for maintaining the various regionserver statistics
  * and publishing them through the metrics interfaces.
- * <p/>
+ * </p>
  * This class has a number of metrics variables that are publicly accessible;
  * these variables (objects) have methods to update their values.
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
index a2284dd..2b12dec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
  * ObserverContext, MiniBatchOperationInProgress)
  * @see org.apache.hadoop.hbase.coprocessor.RegionObserver#postBatchMutate(
  * ObserverContext, MiniBatchOperationInProgress)
- * @param <T> Pair<Mutation, Integer> pair of Mutations and associated rowlock ids .
+ * @param T Pair&lt;Mutation, Integer&gt; pair of Mutations and associated rowlock ids .
  */
 @InterfaceAudience.Private
 public class MiniBatchOperationInProgress<T> {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java
index c0ab1a0..1eb05f0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.Cell;
 
 /**
- * A "non-reversed & non-lazy" scanner which does not support backward scanning
+ * A "non-reversed &amp; non-lazy" scanner which does not support backward scanning
  * and always does a real seek operation. Most scanners are inherited from this
  * class.
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index aedd351..cedaa7c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -1340,14 +1340,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
    * The opening is coordinated by ZooKeeper, and this method requires the znode to be created
    *  before being called. As a consequence, this method should be called only from the master.
    * <p>
-   * Different manages states for the region are:<ul>
+   * Different manages states for the region are:
+   * </p><ul>
    *  <li>region not opened: the region opening will start asynchronously.</li>
    *  <li>a close is already in progress: this is considered as an error.</li>
    *  <li>an open is already in progress: this new open request will be ignored. This is important
    *  because the Master can do multiple requests if it crashes.</li>
-   *  <li>the region is already opened:  this new open request will be ignored./li>
+   *  <li>the region is already opened:  this new open request will be ignored.</li>
    *  </ul>
-   * </p>
    * <p>
    * Bulk assign: If there are more than 1 region to open, it will be considered as a bulk assign.
    * For a single region opening, errors are sent through a ServiceException. For bulk assign,
@@ -1780,7 +1780,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   /**
    * Atomically bulk load several HFiles into an open region
    * @return true if successful, false is failed but recoverably (no action)
-   * @throws IOException if failed unrecoverably
+   * @throws ServiceException if failed unrecoverably
    */
   @Override
   public BulkLoadHFileResponse bulkLoadHFile(final RpcController controller,

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index 5667458..5c500b4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -514,7 +514,7 @@ public interface Region extends ConfigurationObserver {
    * Attempts to atomically load a group of hfiles.  This is critical for loading
    * rows with multiple column families atomically.
    *
-   * @param familyPaths List of Pair<byte[] column family, String hfilePath>
+   * @param familyPaths List of Pair&lt;byte[] column family, String hfilePath&gt;
    * @param bulkLoadListener Internal hooks enabling massaging/preparation of a
    * file about to be bulk loaded
    * @param assignSeqId
@@ -652,7 +652,6 @@ public interface Region extends ConfigurationObserver {
    * the region needs compacting
    *
    * @throws IOException general io exceptions
-   * @throws DroppedSnapshotException Thrown when abort is required
    * because a snapshot was not properly persisted.
    */
   FlushResult flush(boolean force) throws IOException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
index 66e087b..1bc6546 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
@@ -91,7 +91,7 @@ public interface RegionScanner extends InternalScanner {
    * Upon returning from this method, the {@link ScannerContext} will contain information about the
    * progress made towards the limits. This is a special internal method to be called from
    * coprocessor hooks to avoid expensive setup. Caller must set the thread's readpoint, start and
-   * close a region operation, an synchronize on the scanner object. Example: <code><pre>
+   * close a region operation, an synchronize on the scanner object. Example: <code>
    * HRegion region = ...;
    * RegionScanner scanner = ...
    * MultiVersionConsistencyControl.setThreadReadPoint(scanner.getMvccReadPoint());
@@ -105,7 +105,7 @@ public interface RegionScanner extends InternalScanner {
    * } finally {
    *   region.closeRegionOperation();
    * }
-   * </pre></code>
+   * </code>
    * @param result return output array
    * @param scannerContext The {@link ScannerContext} instance encapsulating all limits that should
    *          be tracked during calls to this method. The progress towards these limits can be

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
index a5c17fb..adee911 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
@@ -36,9 +36,10 @@ import org.apache.hadoop.hbase.util.Bytes;
  *
  * <p>
  * This class is utilized through three methods:
- * <ul><li>{@link #add} when encountering a Delete or DeleteColumn
- * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
- * <li>{@link #update} when reaching the end of a StoreFile or row for scans
+ * <ul><li>{@link #add} when encountering a Delete or DeleteColumn</li>
+ * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted</li>
+ * <li>{@link #update} when reaching the end of a StoreFile or row for scans</li>
+ * </ul>
  * <p>
  * This class is NOT thread-safe as queries are never multi-threaded
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
index 3b169ad..46fce67 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
@@ -126,7 +126,7 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
   /**
    * Adds a value to the memstore
    * @param cell
-   * @return memstore size delta & newly added KV which maybe different than the passed in KV
+   * @return memstore size delta &amp; newly added KV which maybe different than the passed in KV
    */
   Pair<Long, Cell> add(Cell cell);
 
@@ -136,8 +136,9 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
   long timeOfOldestEdit();
 
   /**
-   * Removes a Cell from the memstore. The Cell is removed only if its key & memstoreTS match the
-   * key & memstoreTS value of the cell parameter.
+   * Removes a Cell from the memstore. The Cell is removed only if its key
+   * &amp; memstoreTS match the key &amp; memstoreTS value of the cell
+   * parameter.
    * @param cell
    */
   void rollback(final Cell cell);

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 4be5c7b..d63ccca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * Scanner scans both the memstore and the Store. Coalesce KeyValue stream
- * into List<KeyValue> for a single row.
+ * into List&lt;KeyValue&gt; for a single row.
  */
 @InterfaceAudience.Private
 public class StoreScanner extends NonReversedNonLazyKeyValueScanner

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java
index 55c057b..cb89346 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java
@@ -44,7 +44,6 @@ public class ReplayHLogKey extends HLogKey {
   /**
    * Returns the original sequence id
    * @return long the new assigned sequence number
-   * @throws InterruptedException
    */
   @Override
   public long getSequenceId() throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
index 11c4ee1..f6619e8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
@@ -57,9 +57,9 @@ import com.google.common.annotations.VisibleForTesting;
  * Previously, if a transaction contains 3 edits to c1, c2, c3 for a row R,
  * the WAL would have three log entries as follows:
  *
- *    <logseq1-for-edit1>:<KeyValue-for-edit-c1>
- *    <logseq2-for-edit2>:<KeyValue-for-edit-c2>
- *    <logseq3-for-edit3>:<KeyValue-for-edit-c3>
+ *    &lt;logseq1-for-edit1&gt;:&lt;eyValue-for-edit-c1&gt;
+ *    &lt;logseq2-for-edit2&gt;:&lt;KeyValue-for-edit-c2&gt;
+ *    &lt;logseq3-for-edit3&gt;:&lt;KeyValue-for-edit-c3&gt;
  *
  * This presents problems because row level atomicity of transactions
  * was not guaranteed. If we crash after few of the above appends make
@@ -68,15 +68,15 @@ import com.google.common.annotations.VisibleForTesting;
  * In the new world, all the edits for a given transaction are written
  * out as a single record, for example:
  *
- *   <logseq#-for-entire-txn>:<WALEdit-for-entire-txn>
+ *   &lt;logseq#-for-entire-txn&gt;:&lt;WALEdit-for-entire-txn&gt;
  *
  * where, the WALEdit is serialized as:
- *   <-1, # of edits, <KeyValue>, <KeyValue>, ... >
+ *   &lt;-1, # of edits, &lt;KeyValue&gt;, &lt;KeyValue&gt;, ... &gt;
  * For example:
- *   <-1, 3, <Keyvalue-for-edit-c1>, <KeyValue-for-edit-c2>, <KeyValue-for-edit-c3>>
+ *   &lt;-1, 3, &lt;Keyvalue-for-edit-c1&gt;, &lt;KeyValue-for-edit-c2&gt;, &lt;KeyValue-for-edit-c3&gt;&gt;
  *
  * The -1 marker is just a special way of being backward compatible with
- * an old WAL which would have contained a single <KeyValue>.
+ * an old WAL which would have contained a single &lt;KeyValue&gt;.
  *
  * The deserializer for WALEdit backward compatibly detects if the record
  * is an old style KeyValue or the new style WALEdit.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
index 59a1b43..1314a4d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
@@ -53,9 +53,9 @@ import com.google.protobuf.ServiceException;
 
 /**
  * This class is responsible for replaying the edits coming from a failed region server.
- * <p/>
+ * <p>
  * This class uses the native HBase client in order to replay WAL entries.
- * <p/>
+ * </p>
  */
 @InterfaceAudience.Private
 public class WALEditsReplaySink {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
index de82b7e..27f019a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
@@ -160,7 +160,6 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint
    * Get a list of all the addresses of all the region servers
    * for this peer cluster
    * @return list of addresses
-   * @throws KeeperException
    */
   // Synchronize peer cluster connection attempts to avoid races and rate
   // limit connections when multiple replication sources try to connect to

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
index 884bce1..bf31a7d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
@@ -46,9 +46,10 @@ import org.apache.hadoop.ipc.RemoteException;
  * For the slave cluster it selects a random number of peers
  * using a replication ratio. For example, if replication ration = 0.1
  * and slave cluster has 100 region servers, 10 will be selected.
- * <p/>
+ * <p>
  * A stream is considered down when we cannot contact a region server on the
  * peer cluster for more than 55 seconds by default.
+ * </p>
  */
 @InterfaceAudience.Private
 public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoint {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
index 3276418..7d47677 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
@@ -53,16 +53,17 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 
 /**
+ * <p>
  * This class is responsible for replicating the edits coming
  * from another cluster.
- * <p/>
+ * </p><p>
  * This replication process is currently waiting for the edits to be applied
  * before the method can return. This means that the replication of edits
  * is synchronized (after reading from WALs in ReplicationSource) and that a
  * single region server cannot receive edits from two sources at the same time
- * <p/>
+ * </p><p>
  * This class uses the native HBase client in order to replicate entries.
- * <p/>
+ * </p>
  *
  * TODO make this class more like ReplicationSource wrt log handling
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index f7230ab..3f23837 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -66,10 +66,10 @@ import com.google.common.util.concurrent.Service;
  * For each slave cluster it selects a random number of peers
  * using a replication ratio. For example, if replication ration = 0.1
  * and slave cluster has 100 region servers, 10 will be selected.
- * <p/>
+ * <p>
  * A stream is considered down when we cannot contact a region server on the
  * peer cluster for more than 55 seconds by default.
- * <p/>
+ * </p>
  *
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 4d97257..0c8f6f9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -62,9 +62,11 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 /**
  * This class is responsible to manage all the replication
  * sources. There are two classes of sources:
+ * <ul>
  * <li> Normal sources are persistent and one per peer cluster</li>
  * <li> Old sources are recovered from a failed region server and our
  * only goal is to finish replicating the WAL queue it had up in ZK</li>
+ * </ul>
  *
  * When a region server dies, this class uses a watcher to get notified and it
  * tries to grab a lock in order to transfer all the queues in a local

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java
index 742fbff..c756576 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * Per-peer per-node throttling controller for replication: enabled if
- * bandwidth > 0, a cycle = 100ms, by throttling we guarantee data pushed
+ * bandwidth &gt; 0, a cycle = 100ms, by throttling we guarantee data pushed
  * to peer within each cycle won't exceed 'bandwidth' bytes
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 19252bb..131ff14 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -93,6 +93,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * user,family,qualifier    column qualifier level permissions for a user
  * group,family,qualifier   column qualifier level permissions for a group
  * </pre>
+ * <p>
  * All values are encoded as byte arrays containing the codes from the
  * org.apache.hadoop.hbase.security.access.TablePermission.Action enum.
  * </p>

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 93ad41d..7e9299a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -127,6 +127,7 @@ import com.google.protobuf.Service;
  * <p>
  * {@code AccessController} performs authorization checks for HBase operations
  * based on:
+ * </p>
  * <ul>
  *   <li>the identity of the user performing the operation</li>
  *   <li>the scope over which the operation is performed, in increasing
@@ -134,6 +135,7 @@ import com.google.protobuf.Service;
  *   <li>the type of action being performed (as mapped to
  *   {@link Permission.Action} values)</li>
  * </ul>
+ * <p>
  * If the authorization check fails, an {@link AccessDeniedException}
  * will be thrown for the operation.
  * </p>

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
index 92f9d93..774930d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java
@@ -120,7 +120,7 @@ public class VisibilityUtils {
 
   /**
    * Reads back from the zookeeper. The data read here is of the form written by
-   * writeToZooKeeper(Map<byte[], Integer> entries).
+   * writeToZooKeeper(Map&lt;byte[], Integer&gt; entries).
    * 
    * @param data
    * @return Labels and their ordinal details

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index cd04b82..2fc5d83 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
@@ -42,8 +42,8 @@ import org.apache.hadoop.hbase.util.FSUtils;
  *
  * <pre>
  * /hbase/.snapshots
- *          /.tmp                <---- working directory
- *          /[snapshot name]     <----- completed snapshot
+ *          /.tmp                &lt;---- working directory
+ *          /[snapshot name]     &lt;----- completed snapshot
  * </pre>
  *
  * A completed snapshot named 'completed' then looks like (multiple regions, servers, files, etc.
@@ -51,16 +51,16 @@ import org.apache.hadoop.hbase.util.FSUtils;
  *
  * <pre>
  * /hbase/.snapshots/completed
- *                   .snapshotinfo          <--- Description of the snapshot
- *                   .tableinfo             <--- Copy of the tableinfo
+ *                   .snapshotinfo          &lt;--- Description of the snapshot
+ *                   .tableinfo             &lt;--- Copy of the tableinfo
  *                    /.logs
  *                        /[server_name]
  *                            /... [log files]
  *                         ...
- *                   /[region name]           <---- All the region's information
- *                   .regioninfo              <---- Copy of the HRegionInfo
+ *                   /[region name]           &lt;---- All the region's information
+ *                   .regioninfo              &lt;---- Copy of the HRegionInfo
  *                      /[column family name]
- *                          /[hfile name]     <--- name of the hfile in the real region
+ *                          /[hfile name]     &lt;--- name of the hfile in the real region
  *                          ...
  *                      ...
  *                    ...

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java
index 2aba737..6869d69 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.io.Writable;
 @InterfaceAudience.Private
 public interface BloomFilterWriter extends BloomFilterBase {
 
-  /** Compact the Bloom filter before writing metadata & data to disk. */
+  /** Compact the Bloom filter before writing metadata &amp; data to disk. */
   void compactBloom();
   /**
    * Get a writable interface into bloom filter meta data.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index a591cf0..6d10351 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -180,8 +180,9 @@ public abstract class FSUtils {
   }
 
   /**
-   * Compare of path component. Does not consider schema; i.e. if schemas different but <code>path
-   * <code> starts with <code>rootPath<code>, then the function returns true
+   * Compare of path component. Does not consider schema; i.e. if schemas
+   * different but <code>path</code> starts with <code>rootPath</code>,
+   * then the function returns true
    * @param rootPath
    * @param path
    * @return True if <code>path</code> starts with <code>rootPath</code>
@@ -1435,7 +1436,7 @@ public abstract class FSUtils {
    * Given a particular table dir, return all the regiondirs inside it, excluding files such as
    * .tableinfo
    * @param fs A file system for the Path
-   * @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir>
+   * @param tableDir Path to a specific table directory &lt;hbase.rootdir&gt;/&lt;tabledir&gt;
    * @return List of paths to valid region directories in table dir.
    * @throws IOException
    */
@@ -1452,7 +1453,7 @@ public abstract class FSUtils {
 
   /**
    * Filter for all dirs that are legal column family names.  This is generally used for colfam
-   * dirs <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>.
+   * dirs &lt;hbase.rootdir&gt;/&lt;tabledir&gt;/&lt;regiondir&gt;/&lt;colfamdir&gt;.
    */
   public static class FamilyDirFilter implements PathFilter {
     final FileSystem fs;

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 3e164ba..cc87f64 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -611,7 +611,7 @@ public class HBaseFsck extends Configured implements Closeable {
    * region servers and the masters.  It makes each region's state in HDFS, in
    * hbase:meta, and deployments consistent.
    *
-   * @return If > 0 , number of errors detected, if < 0 there was an unrecoverable
+   * @return If &gt; 0 , number of errors detected, if &lt; 0 there was an unrecoverable
    * error.  If 0, we have a clean hbase.
    */
   public int onlineConsistencyRepair() throws IOException, KeeperException,

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
index faced06..7f74d55 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
@@ -60,10 +60,12 @@ import org.apache.hadoop.util.ToolRunner;
  * have such files.
  * <p>
  * To print the help section of the tool:
+ * </p>
  * <ul>
- * <li>./bin/hbase org.apache.hadoop.hbase.util.HFileV1Detector --h or,
- * <li>java -cp `hbase classpath` org.apache.hadoop.hbase.util.HFileV1Detector --h
+ * <li>./bin/hbase org.apache.hadoop.hbase.util.HFileV1Detector --h or,</li>
+ * <li>java -cp `hbase classpath` org.apache.hadoop.hbase.util.HFileV1Detector --h</li>
  * </ul>
+ * <p>
  * It also supports -h, --help, -help options.
  * </p>
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
index a55c876..ed72ea2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
@@ -111,7 +111,6 @@ public class MultiHConnection {
    * @param results the results array
    * @param callback 
    * @throws IOException
-   * @throws InterruptedException
    */
   @SuppressWarnings("deprecation")
   public <R> void processBatchCallback(List<? extends Row> actions, TableName tableName,

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index 3a08750..ea704f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -85,7 +85,7 @@ import com.google.common.collect.Sets;
  * <b>Answer:</b> Automatic splitting is determined by the configuration value
  * <i>HConstants.HREGION_MAX_FILESIZE</i>. It is not recommended that you set this
  * to Long.MAX_VALUE in case you forget about manual splits. A suggested setting
- * is 100GB, which would result in > 1hr major compactions if reached.
+ * is 100GB, which would result in &gt; 1hr major compactions if reached.
  * <p>
  * <b>Question:</b> Why did the original authors decide to manually split? <br>
  * <b>Answer:</b> Specific workload characteristics of our use case allowed us
@@ -227,7 +227,7 @@ public class RegionSplitter {
     /**
      * @param row
      *          byte array representing a row in HBase
-     * @return String to use for debug & file printing
+     * @return String to use for debug &amp; file printing
      */
     String rowToStr(byte[] row);
 
@@ -254,12 +254,12 @@ public class RegionSplitter {
    * <p>
    * <ul>
    * <li>create a table named 'myTable' with 60 pre-split regions containing 2
-   * column families 'test' & 'rs', assuming the keys are hex-encoded ASCII:
+   * column families 'test' &amp; 'rs', assuming the keys are hex-encoded ASCII:
    * <ul>
    * <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 60 -f test:rs
    * myTable HexStringSplit
    * </ul>
-   * <li>perform a rolling split of 'myTable' (i.e. 60 => 120 regions), # 2
+   * <li>perform a rolling split of 'myTable' (i.e. 60 =&gt; 120 regions), # 2
    * outstanding splits at a time, assuming keys are uniformly distributed
    * bytes:
    * <ul>
@@ -878,10 +878,10 @@ public class RegionSplitter {
    * boundaries. The format of a HexStringSplit region boundary is the ASCII
    * representation of an MD5 checksum, or any other uniformly distributed
    * hexadecimal value. Row are hex-encoded long values in the range
-   * <b>"00000000" => "FFFFFFFF"</b> and are left-padded with zeros to keep the
+   * <b>"00000000" =&gt; "FFFFFFFF"</b> and are left-padded with zeros to keep the
    * same order lexicographically as if they were binary.
    *
-   * Since this split algorithm uses hex strings as keys, it is easy to read &
+   * Since this split algorithm uses hex strings as keys, it is easy to read &amp;
    * write in the shell but takes up more space and may be non-intuitive.
    */
   public static class HexStringSplit implements SplitAlgorithm {
@@ -1032,7 +1032,7 @@ public class RegionSplitter {
   /**
    * A SplitAlgorithm that divides the space of possible keys evenly. Useful
    * when the keys are approximately uniform random bytes (e.g. hashes). Rows
-   * are raw byte values in the range <b>00 => FF</b> and are right-padded with
+   * are raw byte values in the range <b>00 =&gt; FF</b> and are right-padded with
    * zeros to keep the same memcmp() order. This is the natural algorithm to use
    * for a byte[] environment and saves space, but is not necessarily the
    * easiest for readability.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
index 67f8e84..5c61afb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
@@ -44,7 +44,7 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil {
    * Whether asynchronous WAL replication to the secondary region replicas is enabled or not.
    * If this is enabled, a replication peer named "region_replica_replication" will be created
    * which will tail the logs and replicate the mutatations to region replicas for tables that
-   * have region replication > 1. If this is enabled once, disabling this replication also
+   * have region replication &gt; 1. If this is enabled once, disabling this replication also
    * requires disabling the replication peer using shell or ReplicationAdmin java class.
    * Replication to secondary region replicas works over standard inter-cluster replication.·
    * So replication, if disabled explicitly, also has to be enabled by setting "hbase.replication"·

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
index 621c200..5ac8c11 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
@@ -295,9 +295,9 @@ public class WALKey implements SequenceId, Comparable<WALKey> {
   }
   
   /**
-   * Wait for sequence number is assigned & return the assigned value
+   * Wait for sequence number is assigned &amp; return the assigned value
    * @return long the new assigned sequence number
-   * @throws InterruptedException
+   * @throws IOException
    */
   @Override
   public long getSequenceId() throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java
index fb769c0..c6bc690 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java
@@ -142,11 +142,11 @@ public class ZKSplitLog {
    */
 
   /**
-   * check if /hbase/recovering-regions/<current region encoded name> exists. Returns true if exists
-   * and set watcher as well.
+   * check if /hbase/recovering-regions/&lt;current region encoded name&gt;
+   * exists. Returns true if exists and set watcher as well.
    * @param zkw
    * @param regionEncodedName region encode name
-   * @return true when /hbase/recovering-regions/<current region encoded name> exists
+   * @return true when /hbase/recovering-regions/&lt;current region encoded name&gt; exists
    * @throws KeeperException
    */
   public static boolean
@@ -199,7 +199,7 @@ public class ZKSplitLog {
    * @param zkw
    * @param serverName
    * @param encodedRegionName
-   * @return the last flushed sequence ids recorded in ZK of the region for <code>serverName<code>
+   * @return the last flushed sequence ids recorded in ZK of the region for <code>serverName</code>
    * @throws IOException
    */
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
index daf320c..7b0ca04 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
@@ -676,7 +676,6 @@ public class ThriftServerRunner implements Runnable {
      *          name of table
      * @return Table object
      * @throws IOException
-     * @throws IOError
      */
     public Table getTable(final byte[] tableName) throws
         IOException {
@@ -718,7 +717,7 @@ public class ThriftServerRunner implements Runnable {
 
     /**
      * Removes the scanner associated with the specified ID from the internal
-     * id->scanner hash-map.
+     * id-&gt;scanner hash-map.
      *
      * @param id
      * @return a Scanner, or null if ID was invalid.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
index bb9e58c..db48a62 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
@@ -564,8 +564,6 @@ public class Hbase {
      * 
      * @throws IllegalArgument if ScannerID is invalid
      * 
-     * @throws NotFound when the scanner reaches the end
-     * 
      * @param id id of a scanner returned by scannerOpen
      */
     public List<TRowResult> scannerGet(int id) throws IOError, IllegalArgument, org.apache.thrift.TException;
@@ -580,8 +578,6 @@ public class Hbase {
      * 
      * @throws IllegalArgument if ScannerID is invalid
      * 
-     * @throws NotFound when the scanner reaches the end
-     * 
      * @param id id of a scanner returned by scannerOpen
      * 
      * @param nbRows number of results to return


[2/3] hbase git commit: HBASE-13569 Correct Javadoc (for Java8)

Posted by bu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
index f6f89b4..ad14f67 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
@@ -41,7 +41,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * servers on same hostname and port (startcode is usually timestamp of server startup). The
  * {@link #toString()} format of ServerName is safe to use in the  filesystem and as znode name
  * up in ZooKeeper.  Its format is:
- * <code>&lt;hostname> '{@link #SERVERNAME_SEPARATOR}' &lt;port> '{@link #SERVERNAME_SEPARATOR}' &lt;startcode></code>.
+ * <code>&lt;hostname&gt; '{@link #SERVERNAME_SEPARATOR}' &lt;port&gt;
+ * '{@link #SERVERNAME_SEPARATOR}' &lt;startcode&gt;</code>.
  * For example, if hostname is <code>www.example.org</code>, port is <code>1234</code>,
  * and the startcode for the regionserver is <code>1212121212</code>, then
  * the {@link #toString()} would be <code>www.example.org,1234,1212121212</code>.
@@ -224,7 +225,7 @@ public class ServerName implements Comparable<ServerName>, Serializable {
    * @param port
    * @param startcode
    * @return Server name made of the concatenation of hostname, port and
-   * startcode formatted as <code>&lt;hostname> ',' &lt;port> ',' &lt;startcode></code>
+   * startcode formatted as <code>&lt;hostname&gt; ',' &lt;port&gt; ',' &lt;startcode&gt;</code>
    */
   static String getServerName(String hostName, int port, long startcode) {
     final StringBuilder name = new StringBuilder(hostName.length() + 1 + 5 + 1 + 13);
@@ -237,10 +238,10 @@ public class ServerName implements Comparable<ServerName>, Serializable {
   }
 
   /**
-   * @param hostAndPort String in form of &lt;hostname> ':' &lt;port>
+   * @param hostAndPort String in form of &lt;hostname&gt; ':' &lt;port&gt;
    * @param startcode
    * @return Server name made of the concatenation of hostname, port and
-   * startcode formatted as <code>&lt;hostname> ',' &lt;port> ',' &lt;startcode></code>
+   * startcode formatted as <code>&lt;hostname&gt; ',' &lt;port&gt; ',' &lt;startcode&gt;</code>
    */
   public static String getServerName(final String hostAndPort,
       final long startcode) {
@@ -339,7 +340,7 @@ public class ServerName implements Comparable<ServerName>, Serializable {
 
   /**
    * @param str Either an instance of {@link ServerName#toString()} or a
-   * "'<hostname>' ':' '<port>'".
+   * "'&lt;hostname&gt;' ':' '&lt;port&gt;'".
    * @return A ServerName instance.
    */
   public static ServerName parseServerName(final String str) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
index 0781e1c..63066b3 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
@@ -126,8 +126,8 @@ public final class TableName implements Comparable<TableName> {
    * The name may not start with '.' or '-'.
    *
    * Valid fully qualified table names:
-   * foo:bar, namespace=>foo, table=>bar
-   * org:foo.bar, namespace=org, table=>foo.bar
+   * foo:bar, namespace=&gt;foo, table=&gt;bar
+   * org:foo.bar, namespace=org, table=&gt;foo.bar
    */
   public static byte [] isLegalFullyQualifiedTableName(final byte[] tableName) {
     if (tableName == null || tableName.length <= 0) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
index 34f1bf7..c6406f2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 /**
  * Accepts a stream of Cells. This can be used to build a block of cells during compactions
  * and flushes, or to build a byte[] to send to the client. This could be backed by a
- * List<KeyValue>, but more efficient implementations will append results to a
+ * List&lt;KeyValue&gt;, but more efficient implementations will append results to a
  * byte[] to eliminate overhead, and possibly encode the cells further.
  * <p>To read Cells, use {@link org.apache.hadoop.hbase.CellScanner}
  * @see org.apache.hadoop.hbase.CellScanner

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
index 8352e4e..ad1c984 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 /**
  * Represents an interval of version timestamps.
  * <p>
- * Evaluated according to minStamp <= timestamp < maxStamp
+ * Evaluated according to minStamp &lt;= timestamp &lt; maxStamp
  * or [minStamp,maxStamp) in interval notation.
  * <p>
  * Only used internally; should not be accessed directly by clients.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
index 62167d6..2d58a18 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  * on the local filesystem. It is configured with a URI passed in as a String
  * to init(). The URI should have the form:
  * <p>
- * <pre>    scheme://path?option1=value1&option2=value2</pre>
+ * <pre>    scheme://path?option1=value1&amp;option2=value2</pre>
  * <p>
  * <i>scheme</i> can be either "jks" or "jceks", specifying the file based
  * providers shipped with every JRE. The latter is the certificate store for

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
index e1da695..1bef221 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
@@ -61,7 +61,7 @@ public class ThrottledInputStream extends InputStream {
     rawStream.close();
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public int read() throws IOException {
     throttle();
@@ -72,7 +72,7 @@ public class ThrottledInputStream extends InputStream {
     return data;
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public int read(byte[] b) throws IOException {
     throttle();
@@ -83,7 +83,7 @@ public class ThrottledInputStream extends InputStream {
     return readLen;
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public int read(byte[] b, int off, int len) throws IOException {
     throttle();
@@ -159,7 +159,7 @@ public class ThrottledInputStream extends InputStream {
     return totalSleepTime;
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public String toString() {
     return "ThrottledInputStream{" +

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
index 9ca0964..4a3d42f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
@@ -45,7 +45,7 @@ public interface Dictionary {
    * 
    * @param data the byte array that we're looking up
    * @param offset Offset into <code>data</code> to add to Dictionary.
-   * @param length Length beyond <code>offset</code> that comprises entry; must be > 0.
+   * @param length Length beyond <code>offset</code> that comprises entry; must be &gt; 0.
    * @return the index of the entry, or {@link #NOT_IN_DICTIONARY} if not found
    */
   short findEntry(byte[] data, int offset, int length);
@@ -59,7 +59,7 @@ public interface Dictionary {
    * 
    * @param data the entry to add
    * @param offset Offset into <code>data</code> to add to Dictionary.
-   * @param length Length beyond <code>offset</code> that comprises entry; must be > 0.
+   * @param length Length beyond <code>offset</code> that comprises entry; must be &gt; 0.
    * @return the index of the entry
    */
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
index db71e8c..0efb402 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
@@ -194,7 +194,6 @@ public abstract class User {
    * @param action
    * @return the result of the action
    * @throws IOException
-   * @throws InterruptedException
    */
   @SuppressWarnings({ "rawtypes", "unchecked" })
   public static <T> T runAsLoginUser(PrivilegedExceptionAction<T> action) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java
index 4ba15ec..550088a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
  * scenario where the end of the buffer has been reached but there are still
  * nullable fields remaining in the {@code Struct} definition. When this
  * happens, it will produce null entries for the remaining values. For example:
+ * </p>
  * <pre>
  * StructBuilder builder = new StructBuilder()
  *     .add(OrderedNumeric.ASCENDING) // nullable
@@ -57,11 +58,10 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
  * Object[] val = new Object[] { BigDecimal.ONE, "foo" };
  * shorter.encode(buf1, val); // write short value with short Struct
  * buf1.setPosition(0); // reset position marker, prepare for read
- * longer.decode(buf1); // => { BigDecimal.ONE, "foo", null } ; long Struct reads implied null
+ * longer.decode(buf1); // =&gt; { BigDecimal.ONE, "foo", null } ; long Struct reads implied null
  * longer.encode(buf2, val); // write short value with long struct
- * Bytes.equals(buf1.getBytes(), buf2.getBytes()); // => true; long Struct skips writing null
+ * Bytes.equals(buf1.getBytes(), buf2.getBytes()); // =&gt; true; long Struct skips writing null
  * </pre>
- * </p>
  * <h3>Sort Order</h3>
  * <p>
  * {@code Struct} instances sort according to the composite order of their

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java
index cd41658..b151b89 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java
@@ -33,7 +33,7 @@ public abstract class AbstractByteRange implements ByteRange {
   // reuse objects of this class
 
   /**
-   * The array containing the bytes in this range. It will be >= length.
+   * The array containing the bytes in this range. It will be &gt;= length.
    */
   protected byte[] bytes;
 
@@ -44,7 +44,7 @@ public abstract class AbstractByteRange implements ByteRange {
   protected int offset;
 
   /**
-   * The number of bytes in the range. Offset + length must be <= bytes.length
+   * The number of bytes in the range. Offset + length must be &lt;= bytes.length
    */
   protected int length;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
index fce0d40..31fb1f5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
@@ -37,7 +37,7 @@ public class Addressing {
   public static final String HOSTNAME_PORT_SEPARATOR = ":";
 
   /**
-   * @param hostAndPort Formatted as <code>&lt;hostname> ':' &lt;port></code>
+   * @param hostAndPort Formatted as <code>&lt;hostname&gt; ':' &lt;port&gt;</code>
    * @return An InetSocketInstance
    */
   public static InetSocketAddress createInetSocketAddressFromHostAndPortStr(
@@ -50,7 +50,7 @@ public class Addressing {
    * @param port Server port
    * @return Returns a concatenation of <code>hostname</code> and
    * <code>port</code> in following
-   * form: <code>&lt;hostname> ':' &lt;port></code>.  For example, if hostname
+   * form: <code>&lt;hostname&gt; ':' &lt;port&gt;</code>.  For example, if hostname
    * is <code>example.org</code> and port is 1234, this method will return
    * <code>example.org:1234</code>
    */
@@ -59,7 +59,7 @@ public class Addressing {
   }
 
   /**
-   * @param hostAndPort Formatted as <code>&lt;hostname> ':' &lt;port></code>
+   * @param hostAndPort Formatted as <code>&lt;hostname&gt; ':' &lt;port&gt;</code>
    * @return The hostname portion of <code>hostAndPort</code>
    */
   public static String parseHostname(final String hostAndPort) {
@@ -71,7 +71,7 @@ public class Addressing {
   }
 
   /**
-   * @param hostAndPort Formatted as <code>&lt;hostname> ':' &lt;port></code>
+   * @param hostAndPort Formatted as <code>&lt;hostname&gt; ':' &lt;port&gt;</code>
    * @return The port portion of <code>hostAndPort</code>
    */
   public static int parsePort(final String hostAndPort) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java
index d1f4f20..a22133d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java
@@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  * </p>
  * <ul>
  *   <li>v2.2.1 - Fixed bug using URL_SAFE and ORDERED encodings. Fixed bug
- *     when using very small files (~< 40 bytes).</li>
+ *     when using very small files (~&lt; 40 bytes).</li>
  *   <li>v2.2 - Added some helper methods for encoding/decoding directly from
  *     one file to the next. Also added a main() method to support command
  *     line encoding/decoding from one file to the next. Also added these

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
index 88b728f..d547db1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
@@ -35,6 +35,8 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  * </p>
  * <p>
  * This interface differs from ByteBuffer:
+ * </p>
+ * <ul>
  * <li>On-heap bytes only</li>
  * <li>Raw {@code byte} access only; does not encode other primitives.</li>
  * <li>Implements {@code equals(Object)}, {@code #hashCode()}, and
@@ -46,7 +48,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  * <li>Can be reused in tight loops like a major compaction which can save
  * significant amounts of garbage. (Without reuse, we throw off garbage like
  * <a href="http://www.youtube.com/watch?v=lkmBH-MjZF4">this thing</a>.)</li>
- * </p>
+ * </ul>
  * <p>
  * Mutable, and always evaluates {@code #equals(Object)}, {@code #hashCode()},
  * and {@code #compareTo(ByteRange)} based on the current contents.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index 5d45260..683b559 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -1377,7 +1377,7 @@ public class Bytes implements Comparable<Bytes> {
    * @param offset Offset into array at which vint begins.
    * @throws java.io.IOException e
    * @return deserialized long from buffer.
-   * @deprecated Use {@link #readAsVLong(byte[], int)} instead.
+   * @deprecated Use {@link #readAsVLong(byte[],int)} instead.
    */
   @Deprecated
   public static long readVLong(final byte [] buffer, final int offset)
@@ -1409,7 +1409,7 @@ public class Bytes implements Comparable<Bytes> {
   /**
    * @param left left operand
    * @param right right operand
-   * @return 0 if equal, < 0 if left is less than right, etc.
+   * @return 0 if equal, &lt; 0 if left is less than right, etc.
    */
   public static int compareTo(final byte [] left, final byte [] right) {
     return LexicographicalComparerHolder.BEST_COMPARER.
@@ -1425,7 +1425,7 @@ public class Bytes implements Comparable<Bytes> {
    * @param offset2 Where to start comparing in the right buffer
    * @param length1 How much to compare from the left buffer
    * @param length2 How much to compare from the right buffer
-   * @return 0 if equal, < 0 if left is less than right, etc.
+   * @return 0 if equal, &lt; 0 if left is less than right, etc.
    */
   public static int compareTo(byte[] buffer1, int offset1, int length1,
       byte[] buffer2, int offset2, int length2) {
@@ -2213,7 +2213,7 @@ public class Bytes implements Comparable<Bytes> {
    * Bytewise binary increment/deincrement of long contained in byte array
    * on given amount.
    *
-   * @param value - array of bytes containing long (length <= SIZEOF_LONG)
+   * @param value - array of bytes containing long (length &lt;= SIZEOF_LONG)
    * @param amount value will be incremented on (deincremented if negative)
    * @return array of bytes containing incremented long (length == SIZEOF_LONG)
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
index 9f5a88b..77acf9b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
@@ -305,7 +305,7 @@ public class ClassSize {
   /**
    * Aligns a number to 8.
    * @param num number to align to 8
-   * @return smallest number >= input that is a multiple of 8
+   * @return smallest number &gt;= input that is a multiple of 8
    */
   public static int align(int num) {
     return (int)(align((long)num));
@@ -314,7 +314,7 @@ public class ClassSize {
   /**
    * Aligns a number to 8.
    * @param num number to align to 8
-   * @return smallest number >= input that is a multiple of 8
+   * @return smallest number &gt;= input that is a multiple of 8
    */
   public static long align(long num) {
     //The 7 comes from that the alignSize is 8 which is the number of bytes

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
index 17ed7b7..1096a17 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
@@ -27,8 +27,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 public class DefaultEnvironmentEdge implements EnvironmentEdge {
   /**
    * {@inheritDoc}
-   * <p/>
+   * <p>
    * This implementation returns {@link System#currentTimeMillis()}
+   * </p>
    */
   @Override
   public long currentTime() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java
index 7b5ecd0..482c5f0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java
@@ -45,9 +45,10 @@ public class IncrementingEnvironmentEdge implements EnvironmentEdge {
 
   /**
    * {@inheritDoc}
-   * <p/>
+   * <p>
    * This method increments a known value for the current time each time this
    * method is called. The first value is 1.
+   * </p>
    */
   @Override
   public synchronized long currentTime() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java
index 8ee214d..789bd8d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java
@@ -66,11 +66,11 @@ public class JenkinsHash extends Hash {
    * <p>The best hash table sizes are powers of 2.  There is no need to do mod
    * a prime (mod is sooo slow!).  If you need less than 32 bits, use a bitmask.
    * For example, if you need only 10 bits, do
-   * <code>h = (h & hashmask(10));</code>
+   * <code>h = (h &amp; hashmask(10));</code>
    * In which case, the hash table should have hashsize(10) elements.
    *
    * <p>If you are hashing n strings byte[][] k, do it like this:
-   * for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h);
+   * for (int i = 0, h = 0; i &lt; n; ++i) h = hash( k[i], h);
    *
    * <p>By Bob Jenkins, 2006.  bob_jenkins@burtleburtle.net.  You may use this
    * code any way you wish, private, educational, or commercial.  It's free.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
index 2e69291..5398582 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
@@ -36,8 +36,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
  * A utility class to manage a set of locks. Each lock is identified by a String which serves
  * as a key. Typical usage is: <p>
  * class Example{
- * private final static KeyLocker<String> locker = new Locker<String>();
- * <p/>
+ * private final static KeyLocker&lt;String&gt; locker = new Locker&lt;String&gt;();
+ * </p>
+ * <p>
  * public void foo(String s){
  * Lock lock = locker.acquireLock(s);
  * try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java
index 20282ff..499e34c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java
@@ -40,7 +40,8 @@ import com.google.common.annotations.VisibleForTesting;
  * Each value is encoded as one or more bytes. The first byte of the encoding,
  * its meaning, and a terse description of the bytes that follow is given by
  * the following table:
- * <table>
+ * </p>
+ * <table summary="Encodings">
  * <tr><th>Content Type</th><th>Encoding</th></tr>
  * <tr><td>NULL</td><td>0x05</td></tr>
  * <tr><td>negative infinity</td><td>0x07</td></tr>
@@ -63,7 +64,6 @@ import com.google.common.annotations.VisibleForTesting;
  * <tr><td>variable length BLOB</td><td>0x35, B</td></tr>
  * <tr><td>byte-for-byte BLOB</td><td>0x36, X</td></tr>
  * </table>
- * </p>
  *
  * <h3>Null Encoding</h3>
  * <p>
@@ -258,8 +258,8 @@ import com.google.common.annotations.VisibleForTesting;
  * values are 5 bytes in length.
  * </p>
  * <p>
- * {@code OrderedBytes} encodings are heavily influenced by the <a href="
- * http://sqlite.org/src4/doc/trunk/www/key_encoding.wiki">SQLite4 Key
+ * {@code OrderedBytes} encodings are heavily influenced by the
+ * <a href="http://sqlite.org/src4/doc/trunk/www/key_encoding.wiki">SQLite4 Key
  * Encoding</a>. Slight deviations are make in the interest of order
  * correctness and user extensibility. Fixed-width {@code Long} and
  * {@link Double} encodings are based on implementations from the now defunct
@@ -1408,6 +1408,7 @@ public class OrderedBytes {
    * -Double.MIN_VALUE &lt; -0.0 &lt; +0.0; &lt; Double.MIN_VALUE &lt; ...
    * &lt; Double.MAX_VALUE &lt; Double.POSITIVE_INFINITY &lt; Double.NaN
    * </p>
+   * <p>
    * Floating point numbers are encoded as specified in IEEE 754. A 64-bit
    * double precision float consists of a sign bit, 11-bit unsigned exponent
    * encoded in offset-1023 notation, and a 52-bit significand. The format is

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
index 4ec0820..8e7751d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
@@ -68,9 +68,9 @@ public class Sleeper {
   }
 
   /**
-   * Sleep for period adjusted by passed <code>startTime<code>
+   * Sleep for period adjusted by passed <code>startTime</code>
    * @param startTime Time some task started previous to now.  Time to sleep
-   * will be docked current time minus passed <code>startTime<code>.
+   * will be docked current time minus passed <code>startTime</code>.
    */
   public void sleep(final long startTime) {
     if (this.stopper.isStopped()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java
index 4c14335..3ab783a 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java
@@ -102,7 +102,7 @@ public interface BaseSource {
 
   /**
    * Get the name of the context in JMX that this source will be exposed through.
-   * This is in ObjectName format. With the default context being Hadoop -> HBase
+   * This is in ObjectName format. With the default context being Hadoop -&gt; HBase
    */
   String getMetricsJmxContext();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java
index f44a445..f703eef 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java
@@ -47,9 +47,10 @@ import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.io.WritableUtils;
 
 /**
+ * <p>
  * This class is created via reflection in DataBlockEncoding enum. Update the enum if class name or
  * package changes.
- * <p/>
+ * </p>
  * PrefixTreeDataBlockEncoder implementation of DataBlockEncoder. This is the primary entry point
  * for PrefixTree encoding and decoding. Encoding is delegated to instances of
  * {@link PrefixTreeEncoder}, and decoding is delegated to instances of

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
index 73e8ab4..a4b4c353 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
@@ -63,8 +63,9 @@ public class PrefixTreeSeeker implements EncodedSeeker {
   }
 
   /**
+   * <p>
    * Currently unused.
-   * <p/>
+   * </p>
    * TODO performance leak. should reuse the searchers. hbase does not currently have a hook where
    * this can be called
    */
@@ -110,12 +111,13 @@ public class PrefixTreeSeeker implements EncodedSeeker {
   }
 
   /**
+   * <p>
    * Currently unused.
-   * <p/>
+   * </p><p>
    * A nice, lightweight reference, though the underlying cell is transient. This method may return
    * the same reference to the backing PrefixTreeCell repeatedly, while other implementations may
    * return a different reference for each Cell.
-   * <p/>
+   * </p>
    * The goal will be to transition the upper layers of HBase, like Filters and KeyValueHeap, to
    * use this method instead of the getKeyValue() methods above.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java
index effad57..f0b249f 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java
@@ -25,10 +25,11 @@ import java.util.concurrent.LinkedBlockingQueue;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
+ * <p>
  * Pools PrefixTreeArraySearcher objects. Each Searcher can consist of hundreds or thousands of
  * objects and 1 is needed for each HFile during a Get operation. With tens of thousands of
  * Gets/second, reusing these searchers may save a lot of young gen collections.
- * <p/>
+ * </p>
  * Alternative implementation would be a ByteBufferSearcherPool (not implemented yet).
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
index ec54c2a..eb0e41f 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
@@ -28,10 +28,11 @@ import org.apache.hadoop.hbase.codec.prefixtree.scanner.CellSearcher;
 import com.google.common.primitives.UnsignedBytes;
 
 /**
+ * <p>
  * Searcher extends the capabilities of the Scanner + ReversibleScanner to add the ability to
  * position itself on a requested Cell without scanning through cells before it. The PrefixTree is
  * set up to be a Trie of rows, so finding a particular row is extremely cheap.
- * <p/>
+ * </p>
  * Once it finds the row, it does a binary search through the cells inside the row, which is not as
  * fast as the trie search, but faster than iterating through every cell like existing block
  * formats
@@ -309,8 +310,8 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im
   /****************** complete seek when token mismatch ******************/
 
   /**
-   * @param searcherIsAfterInputKey <0: input key is before the searcher's position<br/>
-   *          >0: input key is after the searcher's position
+   * @param searcherIsAfterInputKey &lt;0: input key is before the searcher's position<br/>
+   *          &gt;0: input key is after the searcher's position
    */
   protected CellScannerPosition fixRowTokenMissReverse(int searcherIsAfterInputKey) {
     if (searcherIsAfterInputKey < 0) {//searcher position is after the input key, so back up
@@ -337,8 +338,8 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im
   }
 
   /**
-   * @param searcherIsAfterInputKey <0: input key is before the searcher's position<br/>
-   *                   >0: input key is after the searcher's position
+   * @param searcherIsAfterInputKey &lt;0: input key is before the searcher's position<br>
+   *                   &gt;0: input key is after the searcher's position
    */
   protected CellScannerPosition fixRowTokenMissForward(int searcherIsAfterInputKey) {
     if (searcherIsAfterInputKey < 0) {//searcher position is after the input key

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java
index 3e4b75c..926cf30 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java
@@ -46,12 +46,12 @@ import org.apache.hadoop.io.WritableUtils;
 /**
  * This is the primary class for converting a CellOutputStream into an encoded byte[]. As Cells are
  * added they are completely copied into the various encoding structures. This is important because
- * usually the cells being fed in during compactions will be transient.<br/>
- * <br/>
- * Usage:<br/>
- * 1) constructor<br/>
- * 4) append cells in sorted order: write(Cell cell)<br/>
- * 5) flush()<br/>
+ * usually the cells being fed in during compactions will be transient.<br>
+ * <br>
+ * Usage:<br>
+ * 1) constructor<br>
+ * 4) append cells in sorted order: write(Cell cell)<br>
+ * 5) flush()<br>
  */
 @InterfaceAudience.Private
 public class PrefixTreeEncoder implements CellOutputStream {
@@ -391,10 +391,11 @@ public class PrefixTreeEncoder implements CellOutputStream {
   }
 
   /**
+   * <p>
    * The following "compile" methods do any intermediate work necessary to transform the cell
    * fragments collected during the writing phase into structures that are ready to write to the
    * outputStream.
-   * <p/>
+   * </p>
    * The family and qualifier treatment is almost identical, as is timestamp and mvccVersion.
    */
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java
index c1eb03d..467e7ad 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java
@@ -32,14 +32,17 @@ import org.apache.hadoop.hbase.util.vint.UFIntTool;
 import org.apache.hadoop.hbase.util.vint.UVIntTool;
 
 /**
+ * <p>
  * Column nodes can be either family nodes or qualifier nodes, as both sections encode similarly.
  * The family and qualifier sections of the data block are made of 1 or more of these nodes.
- * <p/>
- * Each node is composed of 3 sections:<br/>
+ * </p>
+ * Each node is composed of 3 sections:<br>
+ * <ul>
  * <li>tokenLength: UVInt (normally 1 byte) indicating the number of token bytes
  * <li>token[]: the actual token bytes
  * <li>parentStartPosition: the offset of the next node from the start of the family or qualifier
  * section
+ * </ul>
  */
 @InterfaceAudience.Private
 public class ColumnNodeWriter{

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java
index 3ceae63..b30daf6 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java
@@ -34,9 +34,10 @@ import org.apache.hadoop.hbase.util.vint.UFIntTool;
 import com.google.common.collect.Lists;
 
 /**
+ * <p>
  * Takes the tokenized family or qualifier data and flattens it into a stream of bytes. The family
  * section is written after the row section, and qualifier section after family section.
- * <p/>
+ * </p>
  * The family and qualifier tries, or "column tries", are structured differently than the row trie.
  * The trie cannot be reassembled without external data about the offsets of the leaf nodes, and
  * these external pointers are stored in the nubs and leaves of the row trie. For each cell in a
@@ -45,12 +46,13 @@ import com.google.common.collect.Lists;
  * comprises the column name. To assemble the column name, the trie is traversed in reverse (right
  * to left), with the rightmost tokens pointing to the start of their "parent" node which is the
  * node to the left.
- * <p/>
+ * <p>
  * This choice was made to reduce the size of the column trie by storing the minimum amount of
  * offset data. As a result, to find a specific qualifier within a row, you must do a binary search
  * of the column nodes, reassembling each one as you search. Future versions of the PrefixTree might
  * encode the columns in both a forward and reverse trie, which would convert binary searches into
  * more efficient trie searches which would be beneficial for wide rows.
+ * </p>
  */
 @InterfaceAudience.Private
 public class ColumnSectionWriter {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java
index 5c184bf..35f264b 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java
@@ -188,8 +188,9 @@ public class RowNodeWriter{
    * offsets into the timestamp/column data structures that are written in the middle of the block.
    * We use {@link UFIntTool} to encode these indexes/offsets to allow random access during a binary
    * search of a particular column/timestamp combination.
-   * <p/>
+   * <p>
    * Branch nodes will not have any data in these sections.
+   * </p>
    */
 
   protected void writeFamilyNodeOffsets(OutputStream os) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java
index 75a11ad..f44017b 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java
@@ -31,10 +31,12 @@ import com.google.common.collect.Lists;
 
 /**
  * Data structure used in the first stage of PrefixTree encoding:
+ * <ul>
  * <li>accepts a sorted stream of ByteRanges
  * <li>splits them into a set of tokens, each held by a {@link TokenizerNode}
  * <li>connects the TokenizerNodes via standard java references
  * <li>keeps a pool of TokenizerNodes and a reusable byte[] for holding all token content
+ * </ul>
  * <p><br>
  * Mainly used for turning Cell rowKeys into a trie, but also used for family and qualifier
  * encoding.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
index e51d5be..7da78a7 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
@@ -35,12 +35,12 @@ import com.google.common.collect.Lists;
  * Individual node in a Trie structure.  Each node is one of 3 types:
  * <li>Branch: an internal trie node that may have a token and must have multiple children, but does
  * not represent an actual input byte[], hence its numOccurrences is 0
- * <li>Leaf: a node with no children and where numOccurrences is >= 1.  It's token represents the
+ * <li>Leaf: a node with no children and where numOccurrences is &gt;= 1.  It's token represents the
  * last bytes in the input byte[]s.
  * <li>Nub: a combination of a branch and leaf.  Its token represents the last bytes of input
- * byte[]s and has numOccurrences >= 1, but it also has child nodes which represent input byte[]s
+ * byte[]s and has numOccurrences &gt;= 1, but it also has child nodes which represent input byte[]s
  * that add bytes to this nodes input byte[].
- * <br/><br/>
+ * <br><br>
  * Example inputs (numInputs=7):
  * 0: AAA
  * 1: AAA
@@ -49,13 +49,13 @@ import com.google.common.collect.Lists;
  * 4: AAB
  * 5: AABQQ
  * 6: AABQQ
- * <br/><br/>
+ * <br><br>
  * Resulting TokenizerNodes:
- * AA <- branch, numOccurrences=0, tokenStartOffset=0, token.length=2
- * A  <- leaf, numOccurrences=2, tokenStartOffset=2, token.length=1
- * B  <- nub, numOccurrences=3, tokenStartOffset=2, token.length=1
- * QQ <- leaf, numOccurrences=2, tokenStartOffset=3, token.length=2
- * <br/><br/>
+ * AA &lt;- branch, numOccurrences=0, tokenStartOffset=0, token.length=2
+ * A  &lt;- leaf, numOccurrences=2, tokenStartOffset=2, token.length=1
+ * B  &lt;- nub, numOccurrences=3, tokenStartOffset=2, token.length=1
+ * QQ &lt;- leaf, numOccurrences=2, tokenStartOffset=3, token.length=2
+ * <br><br>
  * numInputs == 7 == sum(numOccurrences) == 0 + 2 + 3 + 2
  */
 @InterfaceAudience.Private
@@ -236,13 +236,15 @@ public class TokenizerNode{
   /**
    * Called when we need to convert a leaf node into a branch with 2 leaves. Comments inside the
    * method assume we have token BAA starting at tokenStartOffset=0 and are adding BOO. The output
-   * will be 3 nodes:<br/>
-   * <li>1: B <- branch
-   * <li>2: AA <- leaf
-   * <li>3: OO <- leaf
+   * will be 3 nodes:<br>
+   * <ul>
+   * <li>1: B &lt;- branch
+   * <li>2: AA &lt;- leaf
+   * <li>3: OO &lt;- leaf
+   * </ul>
    *
-   * @param numTokenBytesToRetain => 1 (the B)
-   * @param bytes => BOO
+   * @param numTokenBytesToRetain =&gt; 1 (the B)
+   * @param bytes =&gt; BOO
    */
   protected void split(int numTokenBytesToRetain, final ByteRange bytes) {
     int childNodeDepth = nodeDepth;

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
index 7e83457..a3ae097 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
@@ -33,19 +33,22 @@ public interface CellSearcher extends ReversibleCellScanner {
   void resetToBeforeFirstEntry();
 
   /**
+   * <p>
    * Do everything within this scanner's power to find the key. Look forward and backwards.
-   * <p/>
+   * </p>
+   * <p>
    * Abort as soon as we know it can't be found, possibly leaving the Searcher in an invalid state.
-   * <p/>
+   * </p>
    * @param key position the CellScanner exactly on this key
    * @return true if the cell existed and getCurrentCell() holds a valid cell
    */
   boolean positionAt(Cell key);
 
   /**
+   * <p>
    * Same as positionAt(..), but go to the extra effort of finding the previous key if there's no
    * exact match.
-   * <p/>
+   * </p>
    * @param key position the CellScanner on this key or the closest cell before
    * @return AT if exact match<br/>
    *         BEFORE if on last cell before key<br/>
@@ -54,9 +57,10 @@ public interface CellSearcher extends ReversibleCellScanner {
   CellScannerPosition positionAtOrBefore(Cell key);
 
   /**
+   * <p>
    * Same as positionAt(..), but go to the extra effort of finding the next key if there's no exact
    * match.
-   * <p/>
+   * </p>
    * @param key position the CellScanner on this key or the closest cell after
    * @return AT if exact match<br/>
    *         AFTER if on first cell after key<br/>
@@ -65,43 +69,47 @@ public interface CellSearcher extends ReversibleCellScanner {
   CellScannerPosition positionAtOrAfter(Cell key);
 
   /**
+   * <p>
    * Note: Added for backwards compatibility with
    * {@link org.apache.hadoop.hbase.regionserver.KeyValueScanner#reseek}
-   * <p/>
+   * </p><p>
    * Look for the key, but only look after the current position. Probably not needed for an
    * efficient tree implementation, but is important for implementations without random access such
    * as unencoded KeyValue blocks.
-   * <p/>
+   * </p>
    * @param key position the CellScanner exactly on this key
    * @return true if getCurrent() holds a valid cell
    */
   boolean seekForwardTo(Cell key);
 
   /**
+   * <p>
    * Same as seekForwardTo(..), but go to the extra effort of finding the next key if there's no
    * exact match.
-   * <p/>
+   * </p>
    * @param key
-   * @return AT if exact match<br/>
-   *         AFTER if on first cell after key<br/>
+   * @return AT if exact match<br>
+   *         AFTER if on first cell after key<br>
    *         AFTER_LAST if key was after the last cell in this scanner's scope
    */
   CellScannerPosition seekForwardToOrBefore(Cell key);
 
   /**
+   * <p>
    * Same as seekForwardTo(..), but go to the extra effort of finding the next key if there's no
    * exact match.
-   * <p/>
+   * </p>
    * @param key
-   * @return AT if exact match<br/>
-   *         AFTER if on first cell after key<br/>
+   * @return AT if exact match<br>
+   *         AFTER if on first cell after key<br>
    *         AFTER_LAST if key was after the last cell in this scanner's scope
    */
   CellScannerPosition seekForwardToOrAfter(Cell key);
 
   /**
+   * <p>
    * Note: This may not be appropriate to have in the interface.  Need to investigate.
-   * <p/>
+   * </p>
    * Position the scanner in an invalid state after the last cell: CellScannerPosition.AFTER_LAST.
    * This is used by tests and for handling certain edge cases.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java
index 3823e7c..c15429b 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java
@@ -35,7 +35,7 @@ public interface ReversibleCellScanner extends CellScanner {
   /**
    * Try to position the scanner one Cell before the current position.
    * @return true if the operation was successful, meaning getCurrentCell() will return a valid
-   *         Cell.<br/>
+   *         Cell.<br>
    *         false if there were no previous cells, meaning getCurrentCell() will return null.
    *         Scanner position will be
    *         {@link org.apache.hadoop.hbase.codec.prefixtree.scanner.CellScannerPosition#BEFORE_FIRST}
@@ -46,7 +46,7 @@ public interface ReversibleCellScanner extends CellScanner {
    * Try to position the scanner in the row before the current row.
    * @param endOfRow true for the last cell in the previous row; false for the first cell
    * @return true if the operation was successful, meaning getCurrentCell() will return a valid
-   *         Cell.<br/>
+   *         Cell.<br>
    *         false if there were no previous cells, meaning getCurrentCell() will return null.
    *         Scanner position will be
    *         {@link org.apache.hadoop.hbase.codec.prefixtree.scanner.CellScannerPosition#BEFORE_FIRST}

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java
index fc7c107..a3da9f0 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java
@@ -29,10 +29,10 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
  * This class converts between positive ints and 1-4 bytes that represent the int.  All input ints
  * must be positive.  Max values stored in N bytes are:
  *
- * N=1: 2^8  =>           256
- * N=2: 2^16 =>        65,536
- * N=3: 2^24 =>    16,777,216
- * N=4: 2^31 => 2,147,483,648 (Integer.MAX_VALUE)
+ * N=1: 2^8  =&gt;           256
+ * N=2: 2^16 =&gt;        65,536
+ * N=3: 2^24 =&gt;    16,777,216
+ * N=4: 2^31 =&gt; 2,147,483,648 (Integer.MAX_VALUE)
  *
  * This was created to get most of the memory savings of a variable length integer when encoding
  * an array of input integers, but to fix the number of bytes for each integer to the number needed

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java
index dd4095b..aeebd2c 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java
@@ -42,7 +42,7 @@ public class UVIntTool {
   public static final byte[]
     MAX_VALUE_BYTES = new byte[] { -1, -1, -1, -1, 7 };
 
-  /********************* int -> bytes **************************/
+  /********************* int -&gt; bytes **************************/
 
   public static int numBytes(int in) {
     if (in == 0) {
@@ -79,7 +79,7 @@ public class UVIntTool {
     return numBytes;
   }
 
-  /******************** bytes -> int **************************/
+  /******************** bytes -&gt; int **************************/
 
   public static int getInt(byte[] bytes) {
     return getInt(bytes, 0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java
index b2437a8..b55e0f6 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java
@@ -43,7 +43,7 @@ public class UVLongTool{
     MAX_VALUE_BYTES = new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, 127 };
 
 
-  /********************* long -> bytes **************************/
+  /********************* long -&gt; bytes **************************/
 
   public static int numBytes(long in) {// do a check for illegal arguments if not protected
     if (in == 0) {
@@ -77,7 +77,7 @@ public class UVLongTool{
     return numBytes;
   }
 
-  /******************** bytes -> long **************************/
+  /******************** bytes -&gt; long **************************/
 
   public static long getLong(byte[] bytes) {
     return getLong(bytes, 0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index 00a12eb..13de210 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -50,9 +50,9 @@ import com.google.protobuf.ByteString;
  * the return is a set of sub-procedures or null in case the procedure doesn't
  * have sub-procedures. Once the sub-procedures are successfully completed
  * the execute() method is called again, you should think at it as a stack:
- *  -> step 1
- *  ---> step 2
- *  -> step 1
+ *  -&gt; step 1
+ *  ---&gt; step 2
+ *  -&gt; step 1
  *
  * rollback() is called when the procedure or one of the sub-procedures is failed.
  * the rollback step is supposed to cleanup the resources created during the

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
index bcb0424..636a037 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProc
 
 /**
  * A SequentialProcedure describes one step in a procedure chain.
- *   -> Step 1 -> Step 2 -> Step 3
+ *   -&gt; Step 1 -&gt; Step 2 -&gt; Step 3
  *
  * The main difference from a base Procedure is that the execute() of a
  * SequentialProcedure will be called only once, there will be no second
@@ -79,4 +79,4 @@ public abstract class SequentialProcedure<TEnvironment> extends Procedure<TEnvir
     SequentialProcedureData data = SequentialProcedureData.parseDelimitedFrom(stream);
     executed = data.getExecuted();
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java b/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java
index 933a6e2..4ffd590 100644
--- a/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java
+++ b/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java
@@ -37,6 +37,8 @@ public final class HBaseZeroCopyByteString extends LiteralByteString {
 
   /**
    * Wraps a byte array in a {@link ByteString} without copying it.
+   * @param array array to be wrapped
+   * @return wrapped array
    */
   public static ByteString wrap(final byte[] array) {
     return new LiteralByteString(array);
@@ -44,6 +46,10 @@ public final class HBaseZeroCopyByteString extends LiteralByteString {
 
   /**
    * Wraps a subset of a byte array in a {@link ByteString} without copying it.
+   * @param array array to be wrapped
+   * @param offset from
+   * @param length length
+   * @return wrapped array
    */
   public static ByteString wrap(final byte[] array, int offset, int length) {
     return new BoundedByteString(array, offset, length);

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java
index 23d26dc..d1216f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java
@@ -99,7 +99,7 @@ public interface InterProcessLock {
   /**
    * Visits the locks (both held and attempted) of this type with the given
    * {@link MetadataHandler}.
-   * @throws InterruptedException If there is an unrecoverable error
+   * @throws IOException If there is an unrecoverable error
    */
   void visitLocks(MetadataHandler handler) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java
index 880875f..3258cbb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java
@@ -38,7 +38,7 @@ public class HFileArchiveTableMonitor {
    * Set the tables to be archived. Internally adds each table and attempts to
    * register it.
    * <p>
-   * <b>Note: All previous tables will be removed in favor of these tables.<b>
+   * <b>Note: All previous tables will be removed in favor of these tables.</b>
    * @param tables add each of the tables to be archived.
    */
   public synchronized void setArchiveTables(List<String> tables) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
index 31746b6..42da0ee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
  *  {@link org.apache.hadoop.hbase.client.Put}.
  * <p>Does <b>NOT</b> attempt the
  *  {@link org.apache.hadoop.hbase.client.Put} multiple times, 
- *  since the constraint <it>should</it> fail every time for 
+ *  since the constraint <b>should</b> fail every time for
  *  the same {@link org.apache.hadoop.hbase.client.Put} (it should be
  * idempotent).
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java
index 9bffc5c..6729f7c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java
@@ -19,7 +19,6 @@
 /**
  * Restrict the domain of a data attribute, often times to fulfill business rules/requirements.
  *
- <p>
  <h2> Table of Contents</h2>
  <ul>
  <li><a href="#overview">Overview</a></li>
@@ -27,7 +26,6 @@
  <li><a href="#caveats">Caveats</a></li>
  <li><a href="#usage">Example Usage</a></li>
  </ul>
- </p>
 
  <h2><a name="overview">Overview</a></h2>
  Constraints are used to enforce business rules in a database.
@@ -127,9 +125,9 @@
  public class IntegerConstraint extends BaseConstraint {
  public void check(Put p) throws ConstraintException {
 
- Map&ltbyte[], List&ltKeyValue&gt&gt familyMap = p.getFamilyMap();
+ Map&lt;byte[], List&lt;KeyValue&gt;&gt; familyMap = p.getFamilyMap();
 
- for (List &ltKeyValue&gt kvs : familyMap.values()) {
+ for (List &lt;KeyValue&gt; kvs : familyMap.values()) {
  for (KeyValue kv : kvs) {
 
  // just make sure that we can actually pull out an int

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
index 917df5b..67fe96a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
@@ -137,7 +137,7 @@ public interface SplitLogManagerCoordination {
    * It removes recovering regions from Coordination
    * @param serverNames servers which are just recovered
    * @param isMetaRecovery whether current recovery is for the meta region on
-   *          <code>serverNames<code>
+   *          <code>serverNames</code>
    */
   void removeRecoveringRegions(Set<String> serverNames, Boolean isMetaRecovery) throws IOException;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
index 6619eaa..7925cb0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
@@ -290,7 +290,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
    * region server hosting the region can allow reads to the recovered region
    * @param recoveredServerNameSet servers which are just recovered
    * @param isMetaRecovery whether current recovery is for the meta region on
-   *          <code>serverNames<code>
+   *          <code>serverNames</code>
    */
   @Override
   public void removeRecoveringRegions(final Set<String> recoveredServerNameSet,
@@ -684,8 +684,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
 
   /**
    * ZooKeeper implementation of
-   * {@link org.apache.hadoop.hbase.coordination.
-   * SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
+   * {@link SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
    */
   @Override
   public void removeStaleRecoveringRegions(final Set<String> knownFailedServers)

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java
index 81c933b..cc78626 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java
@@ -51,11 +51,11 @@ import com.google.protobuf.Service;
  * {@link ColumnInterpreter} is used to interpret column value. This class is
  * parameterized with the following (these are the types with which the {@link ColumnInterpreter}
  * is parameterized, and for more description on these, refer to {@link ColumnInterpreter}):
- * @param <T> Cell value data type
- * @param <S> Promoted data type
- * @param <P> PB message that is used to transport initializer specific bytes
- * @param <Q> PB message that is used to transport Cell (<T>) instance
- * @param <R> PB message that is used to transport Promoted (<S>) instance
+ * @param T Cell value data type
+ * @param S Promoted data type
+ * @param P PB message that is used to transport initializer specific bytes
+ * @param Q PB message that is used to transport Cell (&lt;T&gt;) instance
+ * @param R PB message that is used to transport Promoted (&lt;S&gt;) instance
  */
 @InterfaceAudience.Private
 public class AggregateImplementation<T, S, P extends Message, Q extends Message, R extends Message> 
@@ -229,7 +229,6 @@ extends AggregateService implements CoprocessorService, Coprocessor {
   /**
    * Gives the row count for the given column family and column qualifier, in
    * the given row range as defined in the Scan object.
-   * @throws IOException
    */
   @Override
   public void getRowNum(RpcController controller, AggregateRequest request,

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
index 3e5acc2..e771a92 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
@@ -52,14 +52,14 @@ import com.google.protobuf.Service;
  *
  * Defines a protocol to perform multi row transactions.
  * See {@link MultiRowMutationEndpoint} for the implementation.
- * </br>
+ * <br>
  * See
  * {@link HRegion#mutateRowsWithLocks(java.util.Collection, java.util.Collection)}
  * for details and limitations.
- * </br>
+ * <br>
  * Example:
- * <code><pre>
- * List<Mutation> mutations = ...;
+ * <code>
+ * List&lt;Mutation&gt; mutations = ...;
  * Put p1 = new Put(row1);
  * Put p2 = new Put(row2);
  * ...
@@ -73,7 +73,7 @@ import com.google.protobuf.Service;
  *    MultiRowMutationService.newBlockingStub(channel);
  * MutateRowsRequest mrm = mrmBuilder.build();
  * service.mutateRows(null, mrm);
- * </pre></code>
+ * </code>
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index 507a1bb..93eb5f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -344,7 +344,7 @@ public interface RegionObserver extends Coprocessor {
    * (e.getRegion() returns the parent region)
    * @throws IOException if an error occurred on the coprocessor
    * @deprecated Use preSplit(
-   *    final ObserverContext<RegionCoprocessorEnvironment> c, byte[] splitRow)
+   *    final ObserverContext&lt;RegionCoprocessorEnvironment&gt; c, byte[] splitRow)
    */
   @Deprecated
   void preSplit(final ObserverContext<RegionCoprocessorEnvironment> c) throws IOException;
@@ -1068,7 +1068,8 @@ public interface RegionObserver extends Coprocessor {
    * <li>
    * <code>boolean filterRow()</code> returning true</li>
    * <li>
-   * <code>void filterRow(List<KeyValue> kvs)</code> removing all the kvs from the passed List</li>
+   * <code>void filterRow(List&lt;KeyValue&gt; kvs)</code> removing all the kvs
+   * from the passed List</li>
    * </ol>
    * @param c the environment provided by the region server
    * @param s the scanner
@@ -1095,7 +1096,8 @@ public interface RegionObserver extends Coprocessor {
    * <li>
    * <code>boolean filterRow()</code> returning true</li>
    * <li>
-   * <code>void filterRow(List<KeyValue> kvs)</code> removing all the kvs from the passed List</li>
+   * <code>void filterRow(List<KeyValue> kvs)</code> removing all the kvs from
+   * the passed List</li>
    * </ol>
    * @param c the environment provided by the region server
    * @param s the scanner

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java
index c4777e1..d175aff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java
@@ -62,7 +62,7 @@ when the corresponding events happen. The master transitions regions
 through the following states:
 <p>
 &nbsp;&nbsp;&nbsp;
-unassigned -> pendingOpen -> open -> pendingClose -> closed.
+unassigned -&gt; pendingOpen -&gt; open -&gt; pendingClose -7gt; closed.
 <p>
 Coprocessors have opportunity to intercept and handle events in
 pendingOpen, open, and pendingClose states.
@@ -75,7 +75,7 @@ can piggyback or fail this process.
 <p>
 <ul>
   <li>preOpen, postOpen: Called before and after the region is reported as
- online to the master.</li><p>
+ online to the master.</li>
 </ul>
 <p>
 <h3>Open</h3>
@@ -85,9 +85,9 @@ split, etc.). Coprocessors can piggyback administrative actions via:
 <p>
 <ul>
   <li>preFlush, postFlush: Called before and after the memstore is flushed
-  into a new store file.</li><p>
-  <li>preCompact, postCompact: Called before and after compaction.</li><p>
-  <li>preSplit, postSplit: Called after the region is split.</li><p>
+  into a new store file.</li>
+  <li>preCompact, postCompact: Called before and after compaction.</li>
+  <li>preSplit, postSplit: Called after the region is split.</li>
 </ul>
 <p>
 <h3>PendingClose</h3>
@@ -99,7 +99,7 @@ an indication to this effect will be passed as an argument.
 <p>
 <ul>
   <li>preClose and postClose: Called before and after the region is
-  reported as closed to the master.</li><p>
+  reported as closed to the master.</li>
 </ul>
 <p>
 
@@ -109,23 +109,23 @@ observe and mediate client actions on the region:
 <p>
 <ul>
   <li>preGet, postGet: Called before and after a client makes a Get
-  request.</li><p>
+  request.</li>
   <li>preExists, postExists: Called before and after the client tests
-  for existence using a Get.</li><p>
+  for existence using a Get.</li>
   <li>prePut and postPut: Called before and after the client stores a value.
-  </li><p>
+  </li>
   <li>preDelete and postDelete: Called before and after the client
-  deletes a value.</li><p>
+  deletes a value.</li>
   <li>preScannerOpen postScannerOpen: Called before and after the client
-  opens a new scanner.</li><p>
+  opens a new scanner.</li>
   <li>preScannerNext, postScannerNext: Called before and after the client
-  asks for the next row on a scanner.</li><p>
+  asks for the next row on a scanner.</li>
   <li>preScannerClose, postScannerClose: Called before and after the client
-  closes a scanner.</li><p>
+  closes a scanner.</li>
   <li>preCheckAndPut, postCheckAndPut: Called before and after the client
-  calls checkAndPut().</li><p>
+  calls checkAndPut().</li>
   <li>preCheckAndDelete, postCheckAndDelete: Called before and after the client
-  calls checkAndDelete().</li><p>
+  calls checkAndDelete().</li>
 </ul>
 You can also extend abstract class <code>BaseRegionObserverCoprocessor</code>
 which
@@ -245,7 +245,7 @@ recognize and load it.
 </div>
 <p>
 &lt;path&gt; must point to a jar, can be on any filesystem supported by the
-Hadoop </code>FileSystem</code> object.
+Hadoop <code>FileSystem</code> object.
 <p>
 &lt;class&gt; is the coprocessor implementation class. A jar can contain
 more than one coprocessor implementation, but only one can be specified
@@ -270,7 +270,7 @@ policy implementations, perhaps) ahead of observers.
     ":" + Coprocessor.Priority.USER);
   HBaseAdmin admin = new HBaseAdmin(this.conf);
   admin.createTable(htd);
-
+</pre></blockquote>
 <h3>Chain of RegionObservers</h3>
 As described above, multiple coprocessors can be loaded at one region at the
 same time. In case of RegionObserver, you can have more than one
@@ -278,8 +278,6 @@ RegionObservers register to one same hook point, i.e, preGet(), etc.
 When a region reach the
 hook point, the framework will invoke each registered RegionObserver by the
 order of assigned priority.
-
-</pre></blockquote>
 </div>
 
 */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java
index 750f87c..746c59b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java
@@ -38,7 +38,8 @@ public class TimeoutException extends Exception {
    * Exception indicating that an operation attempt has timed out
    * @param start time the operation started (ms since epoch)
    * @param end time the timeout was triggered (ms since epoch)
-   * @param expected expected amount of time for the operation to complete (ms) (ideally, expected <= end-start)
+   * @param expected expected amount of time for the operation to complete (ms)
+   *                 (ideally, expected &lt;= end-start)
    */
   public TimeoutException(String sourceName, long start, long end, long expected) {
     super("Timeout elapsed! Source:" + sourceName + " Start:" + start + ", End:" + end

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 84c3548..4ce2d94 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -94,9 +94,9 @@ import com.sun.jersey.spi.container.servlet.ServletContainer;
  * Create a Jetty embedded server to answer http requests. The primary goal
  * is to serve up status information for the server.
  * There are three contexts:
- *   "/logs/" -> points to the log directory
- *   "/static/" -> points to common static files (src/webapps/static)
- *   "/" -> the jsp server code from (src/webapps/<name>)
+ *   "/logs/" -&gt; points to the log directory
+ *   "/static/" -&gt; points to common static files (src/webapps/static)
+ *   "/" -&gt; the jsp server code from (src/webapps/<name>)
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -425,7 +425,7 @@ public class HttpServer implements FilterContainer {
 
   /**
    * Create a status server on the given port.
-   * The jsp scripts are taken from src/webapps/<name>.
+   * The jsp scripts are taken from src/webapps/&lt;name&gt;.
    * @param name The name of the server
    * @param port The port to use on the server
    * @param findPort whether the server should start at the given port and
@@ -1108,13 +1108,14 @@ public class HttpServer implements FilterContainer {
 
   /**
    * Checks the user has privileges to access to instrumentation servlets.
-   * <p/>
+   * <p>
    * If <code>hadoop.security.instrumentation.requires.admin</code> is set to FALSE
    * (default value) it always returns TRUE.
-   * <p/>
+   * </p><p>
    * If <code>hadoop.security.instrumentation.requires.admin</code> is set to TRUE
    * it will check that if the current user is in the admin ACLS. If the user is
    * in the admin ACLs it returns TRUE, otherwise it returns FALSE.
+   * </p>
    *
    * @param servletContext the servlet context.
    * @param request the servlet request.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java
index 7549a3e..e4a971a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java
@@ -16,9 +16,8 @@
  * limitations under the License.
  */
 /**
- * </ul>
  * <p>
- * Copied from hadoop source code.<br/>
+ * Copied from hadoop source code.<br>
  * See https://issues.apache.org/jira/browse/HADOOP-10232 to know why.
  * </p>
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
index 1c5a593..3caf67f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
@@ -91,7 +91,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
 public class FileLink {
   private static final Log LOG = LogFactory.getLog(FileLink.class);
 
-  /** Define the Back-reference directory name prefix: .links-<hfile>/ */
+  /** Define the Back-reference directory name prefix: .links-&lt;hfile&gt;/ */
   public static final String BACK_REFERENCES_DIRECTORY_PREFIX = ".links-";
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
index ff33951..c17720c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
@@ -401,7 +401,6 @@ public class HFileLink extends FileLink {
    * @param rootDir root hbase directory
    * @param linkRefPath Link Back Reference path
    * @return full path of the referenced hfile
-   * @throws IOException on unexpected error.
    */
   public static Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath) {
     Pair<TableName, String> p = parseBackReferenceName(linkRefPath.getName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java
index fc5bd5d..344d496 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java
@@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.util.FSUtils;
 /**
  * WALLink describes a link to a WAL.
  *
- * An wal can be in /hbase/.logs/<server>/<wal>
- * or it can be in /hbase/.oldlogs/<wal>
+ * An wal can be in /hbase/.logs/&lt;server&gt;/&lt;wal&gt;
+ * or it can be in /hbase/.oldlogs/&lt;wal&gt;
  *
  * The link checks first in the original path,
  * if it is not present it fallbacks to the archived path.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 35458a2..d18dada 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -119,11 +119,12 @@ import com.google.common.base.Preconditions;
  * File is made of data blocks followed by meta data blocks (if any), a fileinfo
  * block, data block index, meta data block index, and a fixed size trailer
  * which records the offsets at which file changes content type.
- * <pre>&lt;data blocks>&lt;meta blocks>&lt;fileinfo>&lt;data index>&lt;meta index>&lt;trailer></pre>
+ * <pre>&lt;data blocks&gt;&lt;meta blocks&gt;&lt;fileinfo&gt;&lt;
+ * data index&gt;&lt;meta index&gt;&lt;trailer&gt;</pre>
  * Each block has a bit of magic at its start.  Block are comprised of
  * key/values.  In data blocks, they are both byte arrays.  Metadata blocks are
  * a String key and a byte array value.  An empty file looks like this:
- * <pre>&lt;fileinfo>&lt;trailer></pre>.  That is, there are not data nor meta
+ * <pre>&lt;fileinfo&gt;&lt;trailer&gt;</pre>.  That is, there are not data nor meta
  * blocks present.
  * <p>
  * TODO: Do scanners need to be able to take a start and end row?


[3/3] hbase git commit: HBASE-13569 Correct Javadoc (for Java8)

Posted by bu...@apache.org.
HBASE-13569 Correct Javadoc (for Java8)

Signed-off-by: Sean Busbey <bu...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/682b8ab8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/682b8ab8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/682b8ab8

Branch: refs/heads/master
Commit: 682b8ab8a542a903e5807053282693e3a96bad2d
Parents: 293506c
Author: Gábor Lipták <gl...@gmail.com>
Authored: Sun Apr 26 21:07:45 2015 -0400
Committer: Sean Busbey <bu...@apache.org>
Committed: Sat Jun 13 01:10:37 2015 -0500

----------------------------------------------------------------------
 .../apache/hadoop/hbase/HColumnDescriptor.java  |  6 +--
 .../org/apache/hadoop/hbase/HRegionInfo.java    |  7 ++-
 .../apache/hadoop/hbase/HTableDescriptor.java   |  2 +-
 .../apache/hadoop/hbase/MetaTableAccessor.java  |  4 +-
 .../hbase/client/AbstractClientScanner.java     |  4 +-
 .../org/apache/hadoop/hbase/client/Admin.java   | 12 ++---
 .../client/ClientSmallReversedScanner.java      |  3 +-
 .../apache/hadoop/hbase/client/Durability.java  |  4 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  3 +-
 .../hadoop/hbase/client/HTableInterface.java    |  2 +-
 .../hadoop/hbase/client/HTableMultiplexer.java  |  3 --
 .../org/apache/hadoop/hbase/client/Put.java     |  2 +-
 .../org/apache/hadoop/hbase/client/Result.java  | 10 ++---
 .../hadoop/hbase/client/ResultScanner.java      |  2 +-
 .../hadoop/hbase/client/RetryingCallable.java   |  4 +-
 .../RpcRetryingCallerWithReadReplicas.java      |  3 +-
 .../org/apache/hadoop/hbase/client/Scan.java    | 11 ++---
 .../client/coprocessor/AggregationClient.java   | 41 ++++++++---------
 .../hadoop/hbase/client/package-info.java       |  5 ---
 .../hbase/coprocessor/ColumnInterpreter.java    | 31 ++++++-------
 .../hadoop/hbase/filter/FamilyFilter.java       |  7 +--
 .../org/apache/hadoop/hbase/filter/Filter.java  |  1 -
 .../apache/hadoop/hbase/filter/FilterBase.java  | 18 ++++----
 .../apache/hadoop/hbase/filter/FilterList.java  |  8 ++--
 .../hbase/filter/MultiRowRangeFilter.java       |  2 +-
 .../apache/hadoop/hbase/filter/PageFilter.java  |  4 +-
 .../apache/hadoop/hbase/filter/SkipFilter.java  |  4 +-
 .../apache/hadoop/hbase/ipc/AsyncRpcClient.java |  1 -
 .../apache/hadoop/hbase/ipc/ConnectionId.java   |  2 +-
 .../hadoop/hbase/ipc/ServerRpcController.java   |  3 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |  6 +--
 .../hadoop/hbase/quotas/QuotaTableUtil.java     | 10 ++---
 .../hadoop/hbase/regionserver/BloomType.java    |  2 +-
 .../hbase/replication/ReplicationPeers.java     |  2 +-
 .../security/visibility/CellVisibility.java     |  7 +--
 .../apache/hadoop/hbase/zookeeper/ZKUtil.java   |  3 --
 .../hbase/zookeeper/ZooKeeperWatcher.java       |  1 -
 .../main/java/org/apache/hadoop/hbase/Cell.java | 47 +++++++++++---------
 .../org/apache/hadoop/hbase/CellComparator.java |  4 +-
 .../org/apache/hadoop/hbase/CellScannable.java  |  2 +-
 .../org/apache/hadoop/hbase/CellScanner.java    |  2 +-
 .../org/apache/hadoop/hbase/HConstants.java     |  3 +-
 .../java/org/apache/hadoop/hbase/KeyValue.java  | 27 ++++++-----
 .../org/apache/hadoop/hbase/KeyValueUtil.java   |  2 +-
 .../org/apache/hadoop/hbase/ServerName.java     | 11 ++---
 .../java/org/apache/hadoop/hbase/TableName.java |  4 +-
 .../hadoop/hbase/io/CellOutputStream.java       |  2 +-
 .../org/apache/hadoop/hbase/io/TimeRange.java   |  2 +-
 .../hbase/io/crypto/KeyStoreKeyProvider.java    |  2 +-
 .../io/hadoopbackport/ThrottledInputStream.java |  8 ++--
 .../apache/hadoop/hbase/io/util/Dictionary.java |  4 +-
 .../org/apache/hadoop/hbase/security/User.java  |  1 -
 .../org/apache/hadoop/hbase/types/Struct.java   |  6 +--
 .../hadoop/hbase/util/AbstractByteRange.java    |  4 +-
 .../apache/hadoop/hbase/util/Addressing.java    |  8 ++--
 .../org/apache/hadoop/hbase/util/Base64.java    |  2 +-
 .../org/apache/hadoop/hbase/util/ByteRange.java |  4 +-
 .../org/apache/hadoop/hbase/util/Bytes.java     |  8 ++--
 .../org/apache/hadoop/hbase/util/ClassSize.java |  4 +-
 .../hbase/util/DefaultEnvironmentEdge.java      |  3 +-
 .../hbase/util/IncrementingEnvironmentEdge.java |  3 +-
 .../apache/hadoop/hbase/util/JenkinsHash.java   |  4 +-
 .../org/apache/hadoop/hbase/util/KeyLocker.java |  5 ++-
 .../apache/hadoop/hbase/util/OrderedBytes.java  |  9 ++--
 .../org/apache/hadoop/hbase/util/Sleeper.java   |  4 +-
 .../apache/hadoop/hbase/metrics/BaseSource.java |  2 +-
 .../hbase/codec/prefixtree/PrefixTreeCodec.java |  3 +-
 .../codec/prefixtree/PrefixTreeSeeker.java      |  8 ++--
 .../prefixtree/decode/ArraySearcherPool.java    |  3 +-
 .../decode/PrefixTreeArraySearcher.java         | 11 ++---
 .../prefixtree/encode/PrefixTreeEncoder.java    | 15 ++++---
 .../encode/column/ColumnNodeWriter.java         |  7 ++-
 .../encode/column/ColumnSectionWriter.java      |  6 ++-
 .../prefixtree/encode/row/RowNodeWriter.java    |  3 +-
 .../prefixtree/encode/tokenize/Tokenizer.java   |  2 +
 .../encode/tokenize/TokenizerNode.java          | 32 ++++++-------
 .../codec/prefixtree/scanner/CellSearcher.java  | 34 ++++++++------
 .../scanner/ReversibleCellScanner.java          |  4 +-
 .../hadoop/hbase/util/vint/UFIntTool.java       |  8 ++--
 .../hadoop/hbase/util/vint/UVIntTool.java       |  4 +-
 .../hadoop/hbase/util/vint/UVLongTool.java      |  4 +-
 .../hadoop/hbase/procedure2/Procedure.java      |  6 +--
 .../hbase/procedure2/SequentialProcedure.java   |  4 +-
 .../protobuf/HBaseZeroCopyByteString.java       |  6 +++
 .../apache/hadoop/hbase/InterProcessLock.java   |  2 +-
 .../example/HFileArchiveTableMonitor.java       |  2 +-
 .../hbase/constraint/ConstraintException.java   |  2 +-
 .../hadoop/hbase/constraint/package-info.java   |  6 +--
 .../SplitLogManagerCoordination.java            |  2 +-
 .../ZKSplitLogManagerCoordination.java          |  5 +--
 .../coprocessor/AggregateImplementation.java    | 11 +++--
 .../coprocessor/MultiRowMutationEndpoint.java   | 10 ++---
 .../hbase/coprocessor/RegionObserver.java       |  8 ++--
 .../hadoop/hbase/coprocessor/package-info.java  | 36 +++++++--------
 .../hbase/errorhandling/TimeoutException.java   |  3 +-
 .../apache/hadoop/hbase/http/HttpServer.java    | 13 +++---
 .../apache/hadoop/hbase/http/package-info.java  |  3 +-
 .../org/apache/hadoop/hbase/io/FileLink.java    |  2 +-
 .../org/apache/hadoop/hbase/io/HFileLink.java   |  1 -
 .../org/apache/hadoop/hbase/io/WALLink.java     |  4 +-
 .../org/apache/hadoop/hbase/io/hfile/HFile.java |  5 ++-
 .../hadoop/hbase/io/hfile/HFileBlock.java       | 12 ++---
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java  |  2 +-
 .../hadoop/hbase/io/hfile/HFileScanner.java     | 18 ++++----
 .../hadoop/hbase/io/hfile/LruBlockCache.java    |  2 +-
 .../hbase/io/hfile/LruCachedBlockQueue.java     |  2 +-
 .../hadoop/hbase/io/hfile/package-info.java     |  2 +-
 .../apache/hadoop/hbase/ipc/RpcCallContext.java |  2 +-
 .../hbase/mapreduce/MultiTableInputFormat.java  |  2 +-
 .../hbase/mapreduce/TableMapReduceUtil.java     |  9 ++--
 .../hbase/mapreduce/TableRecordReaderImpl.java  |  3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  2 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  2 +-
 .../hadoop/hbase/master/RegionStateStore.java   |  2 +-
 .../master/balancer/SimpleLoadBalancer.java     |  4 +-
 .../master/balancer/StochasticLoadBalancer.java |  4 +-
 .../hbase/regionserver/ColumnTracker.java       | 14 +++---
 .../hbase/regionserver/CompactionRequestor.java |  4 +-
 .../hbase/regionserver/DefaultMemStore.java     |  2 +-
 .../hbase/regionserver/DeleteTracker.java       |  7 +--
 .../regionserver/ExplicitColumnTracker.java     |  7 +--
 .../hbase/regionserver/HeapMemoryManager.java   |  2 +-
 ...IncreasingToUpperBoundRegionSplitPolicy.java |  4 +-
 .../hadoop/hbase/regionserver/LruHashMap.java   |  6 +--
 .../hbase/regionserver/MemStoreChunkPool.java   |  2 +-
 .../hbase/regionserver/MetricsRegionServer.java |  3 +-
 .../MiniBatchOperationInProgress.java           |  2 +-
 .../NonReversedNonLazyKeyValueScanner.java      |  2 +-
 .../hbase/regionserver/RSRpcServices.java       |  8 ++--
 .../hadoop/hbase/regionserver/Region.java       |  3 +-
 .../hbase/regionserver/RegionScanner.java       |  4 +-
 .../hbase/regionserver/ScanDeleteTracker.java   |  7 +--
 .../apache/hadoop/hbase/regionserver/Store.java |  7 +--
 .../hadoop/hbase/regionserver/StoreScanner.java |  2 +-
 .../hbase/regionserver/wal/ReplayHLogKey.java   |  1 -
 .../hadoop/hbase/regionserver/wal/WALEdit.java  | 14 +++---
 .../regionserver/wal/WALEditsReplaySink.java    |  4 +-
 .../replication/HBaseReplicationEndpoint.java   |  1 -
 .../HBaseInterClusterReplicationEndpoint.java   |  3 +-
 .../regionserver/ReplicationSink.java           |  7 +--
 .../regionserver/ReplicationSource.java         |  4 +-
 .../regionserver/ReplicationSourceManager.java  |  2 +
 .../regionserver/ReplicationThrottler.java      |  2 +-
 .../security/access/AccessControlLists.java     |  1 +
 .../hbase/security/access/AccessController.java |  2 +
 .../security/visibility/VisibilityUtils.java    |  2 +-
 .../snapshot/SnapshotDescriptionUtils.java      | 14 +++---
 .../hadoop/hbase/util/BloomFilterWriter.java    |  2 +-
 .../org/apache/hadoop/hbase/util/FSUtils.java   |  9 ++--
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  2 +-
 .../hadoop/hbase/util/HFileV1Detector.java      |  6 ++-
 .../hadoop/hbase/util/MultiHConnection.java     |  1 -
 .../hadoop/hbase/util/RegionSplitter.java       | 14 +++---
 .../hbase/util/ServerRegionReplicaUtil.java     |  2 +-
 .../org/apache/hadoop/hbase/wal/WALKey.java     |  4 +-
 .../hadoop/hbase/zookeeper/ZKSplitLog.java      |  8 ++--
 .../hadoop/hbase/thrift/ThriftServerRunner.java |  3 +-
 .../hadoop/hbase/thrift/generated/Hbase.java    |  4 --
 158 files changed, 510 insertions(+), 458 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index 20cfbef..3da018a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -85,8 +85,8 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
   /**
    * Key for cache data into L1 if cache is set up with more than one tier.
    * To set in the shell, do something like this:
-   * <code>hbase(main):003:0> create 't',
-   *    {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}</code>
+   * <code>hbase(main):003:0&gt; create 't',
+   *    {NAME =&gt; 't', CONFIGURATION =&gt; {CACHE_DATA_IN_L1 =&gt; 'true'}}</code>
    */
   public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
 
@@ -115,7 +115,7 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
   /**
    * Retain all cells across flushes and compactions even if they fall behind
    * a delete tombstone. To see all retained cells, do a 'raw' scan; see
-   * Scan#setRaw or pass RAW => true attribute in the shell.
+   * Scan#setRaw or pass RAW =&gt; true attribute in the shell.
    */
   public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
   public static final String COMPRESS_TAGS = "COMPRESS_TAGS";

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index adca3d7..c134063 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -54,17 +54,21 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * about the region.
  *
  * The region has a unique name which consists of the following fields:
+ * <ul>
  * <li> tableName   : The name of the table </li>
  * <li> startKey    : The startKey for the region. </li>
  * <li> regionId    : A timestamp when the region is created. </li>
  * <li> replicaId   : An id starting from 0 to differentiate replicas of the same region range
  * but hosted in separated servers. The same region range can be hosted in multiple locations.</li>
  * <li> encodedName : An MD5 encoded string for the region name.</li>
+ * </ul>
  *
  * <br> Other than the fields in the region name, region info contains:
+ * <ul>
  * <li> endKey      : the endKey for the region (exclusive) </li>
  * <li> split       : Whether the region is split </li>
  * <li> offline     : Whether the region is offline </li>
+ * </ul>
  *
  * In 0.98 or before, a list of table's regions would fully cover the total keyspace, and at any
  * point in time, a row key always belongs to a single region, which is hosted in a single server.
@@ -647,7 +651,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
    * by this region. For example, if the region is foo,a,g and this is
    * passed ["b","c"] or ["a","c"] it will return true, but if this is passed
    * ["b","z"] it will return false.
-   * @throws IllegalArgumentException if the range passed is invalid (ie end < start)
+   * @throws IllegalArgumentException if the range passed is invalid (ie. end &lt; start)
    */
   public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) {
     if (Bytes.compareTo(rangeStartKey, rangeEndKey) > 0) {
@@ -1098,7 +1102,6 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
    * @param r Result to pull from
    * @return A pair of the {@link HRegionInfo} and the {@link ServerName}
    * (or null for server address if no address set in hbase:meta).
-   * @throws IOException
    * @deprecated use MetaTableAccessor methods for interacting with meta layouts
    */
   @Deprecated

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index f2f0077..58067ea 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -179,7 +179,7 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
 
   /**
    * <em>INTERNAL</em> flag to indicate whether or not the memstore should be replicated
-   * for read-replicas (CONSISTENCY => TIMELINE).
+   * for read-replicas (CONSISTENCY =&gt; TIMELINE).
    */
   public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
   private static final Bytes REGION_MEMSTORE_REPLICATION_KEY =

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index c53f998..86e8d46 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -180,7 +180,7 @@ public class MetaTableAccessor {
     }
   }
 
-  /** The delimiter for meta columns for replicaIds > 0 */
+  /** The delimiter for meta columns for replicaIds &gt; 0 */
   protected static final char META_REPLICA_ID_DELIMITER = '_';
 
   /** A regex for parsing server columns from meta. See above javadoc for meta layout */
@@ -1080,7 +1080,7 @@ public class MetaTableAccessor {
   /**
    * Fetch table states from META table
    * @param conn connection to use
-   * @return map {tableName -> state}
+   * @return map {tableName -&gt; state}
    * @throws IOException
    */
   public static Map<TableName, TableState> getTableStates(Connection conn)

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
index dc325a3..7658faf 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
@@ -54,11 +54,11 @@ public abstract class AbstractClientScanner implements ResultScanner {
   }
 
   /**
-   * Get <param>nbRows</param> rows.
+   * Get nbRows rows.
    * How many RPCs are made is determined by the {@link Scan#setCaching(int)}
    * setting (or hbase.client.scanner.caching in hbase-site.xml).
    * @param nbRows number of rows to return
-   * @return Between zero and <param>nbRows</param> RowResults.  Scan is done
+   * @return Between zero and nbRows rowResults.  Scan is done
    * if returned array is of zero-length (We never return null).
    * @throws IOException
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index f00d6c9..fcc0cae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -199,7 +199,7 @@ public interface Admin extends Abortable, Closeable {
    *
    * @param desc table descriptor for table
    * @throws IllegalArgumentException if the table name is reserved
-   * @throws MasterNotRunningException if master is not running
+   * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
    * threads, the table may have been created between test-for-existence and attempt-at-creation).
    * @throws IOException if a remote or network exception occurs
@@ -218,7 +218,7 @@ public interface Admin extends Abortable, Closeable {
    * @param endKey end of key range
    * @param numRegions the total number of regions to create
    * @throws IllegalArgumentException if the table name is reserved
-   * @throws MasterNotRunningException if master is not running
+   * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
    * threads, the table may have been created between test-for-existence and attempt-at-creation).
    * @throws IOException
@@ -235,7 +235,7 @@ public interface Admin extends Abortable, Closeable {
    * @param splitKeys array of split keys for the initial regions of the table
    * @throws IllegalArgumentException if the table name is reserved, if the split keys are repeated
    * and if the split key has empty byte array.
-   * @throws MasterNotRunningException if master is not running
+   * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
    * threads, the table may have been created between test-for-existence and attempt-at-creation).
    * @throws IOException
@@ -248,11 +248,11 @@ public interface Admin extends Abortable, Closeable {
    * It may throw ExecutionException if there was an error while executing the operation
    * or TimeoutException in case the wait timeout was not long enough to allow the
    * operation to complete.
+   * Throws IllegalArgumentException Bad table name, if the split keys
+   *    are repeated and if the split key has empty byte array.
    *
    * @param desc table descriptor for table
    * @param splitKeys keys to check if the table has been created with all split keys
-   * @throws IllegalArgumentException Bad table name, if the split keys
-   *    are repeated and if the split key has empty byte array.
    * @throws IOException if a remote or network exception occurs
    * @return the result of the async creation. You can use Future.get(long, TimeUnit)
    *    to wait on the operation to complete.
@@ -727,7 +727,7 @@ public interface Admin extends Abortable, Closeable {
    * @param destServerName The servername of the destination regionserver.  If passed the empty byte
    * array we'll assign to a random server.  A server name is made of host, port and startcode.
    * Here is an example: <code> host187.example.com,60020,1289493121758</code>
-   * @throws UnknownRegionException Thrown if we can't find a region named
+   * @throws IOException if we can't find a region named
    * <code>encodedRegionName</code>
    */
   void move(final byte[] encodedRegionName, final byte[] destServerName)

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java
index 28502dc..5fac93a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java
@@ -39,10 +39,11 @@ import org.apache.hadoop.hbase.util.Bytes;
 import com.google.common.annotations.VisibleForTesting;
 
 /**
+ * <p>
  * Client scanner for small reversed scan. Generally, only one RPC is called to fetch the
  * scan results, unless the results cross multiple regions or the row count of
  * results exceed the caching.
- * <p/>
+ * </p>
  * For small scan, it will get better performance than {@link ReversedClientScanner}
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java
index bc9a4ed..9b35e04 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java
@@ -47,13 +47,13 @@ public enum Durability {
    * Write the Mutation to the WAL synchronously.
    * The data is flushed to the filesystem implementation, but not necessarily to disk.
    * For HDFS this will flush the data to the designated number of DataNodes.
-   * See <a href="https://issues.apache.org/jira/browse/HADOOP-6313">HADOOP-6313<a/>
+   * See <a href="https://issues.apache.org/jira/browse/HADOOP-6313">HADOOP-6313</a>
    */
   SYNC_WAL,
   /**
    * Write the Mutation to the WAL synchronously and force the entries to disk.
    * (Note: this is currently not supported and will behave identical to {@link #SYNC_WAL})
-   * See <a href="https://issues.apache.org/jira/browse/HADOOP-6313">HADOOP-6313<a/>
+   * See <a href="https://issues.apache.org/jira/browse/HADOOP-6313">HADOOP-6313</a>
    */
   FSYNC_WAL
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 7047714..a06fb2c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -231,7 +231,8 @@ public class HBaseAdmin implements Admin {
    * The connection to master will be created when required by admin functions.
    *
    * @param connection The Connection instance to use
-   * @throws MasterNotRunningException, ZooKeeperConnectionException are not
+   * @throws MasterNotRunningException
+   * @throws ZooKeeperConnectionException are not
    *  thrown anymore but kept into the interface for backward api compatibility
    * @deprecated Constructing HBaseAdmin objects manually has been deprecated.
    * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
index 8436307..745c770 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
@@ -126,7 +126,7 @@ public interface HTableInterface extends Table {
    * Executes all the buffered {@link Put} operations.
    * <p>
    * This method gets called once automatically for every {@link Put} or batch
-   * of {@link Put}s (when <code>put(List<Put>)</code> is used) when
+   * of {@link Put}s (when <code>put(List&lt;Put&gt;)</code> is used) when
    * {@link #isAutoFlush} is {@code true}.
    * @throws IOException if a remote or network exception occurs.
    * @deprecated as of 1.0.0. Replaced by {@link BufferedMutator#flush()}

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
index 10308da..b1f5b9f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
@@ -121,7 +121,6 @@ public class HTableMultiplexer {
    * @param tableName
    * @param put
    * @return true if the request can be accepted by its corresponding buffer queue.
-   * @throws IOException
    */
   public boolean put(TableName tableName, final Put put) {
     return put(tableName, put, this.retryNum);
@@ -133,7 +132,6 @@ public class HTableMultiplexer {
    * @param tableName
    * @param puts
    * @return the list of puts which could not be queued
-   * @throws IOException
    */
   public List<Put> put(TableName tableName, final List<Put> puts) {
     if (puts == null)
@@ -169,7 +167,6 @@ public class HTableMultiplexer {
    * retried before dropping the request.
    * Return false if the queue is already full.
    * @return true if the request can be accepted by its corresponding buffer queue.
-   * @throws IOException
    */
   public boolean put(final TableName tableName, final Put put, int retry) {
     if (retry <= 0) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
index c895eb4..717ea3f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
@@ -328,7 +328,7 @@ public class Put extends Mutation implements HeapSize, Comparable<Row> {
 
   /**
    * A convenience method to determine if this object's familyMap contains
-   * a value assigned to the given family & qualifier.
+   * a value assigned to the given family &amp; qualifier.
    * Both given arguments must match the KeyValue object to return true.
    *
    * @param family column family

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index 666069c..702983b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -204,7 +204,7 @@ public class Result implements CellScannable, CellScanner {
   /**
    * Return the array of Cells backing this Result instance.
    *
-   * The array is sorted from smallest -> largest using the
+   * The array is sorted from smallest -&gt; largest using the
    * {@link CellComparator#COMPARATOR}.
    *
    * The array only contains what your Get or Scan specifies and no more.
@@ -601,7 +601,7 @@ public class Result implements CellScannable, CellScanner {
    * Map of families to all versions of its qualifiers and values.
    * <p>
    * Returns a three level Map of the form:
-   * <code>Map&amp;family,Map&lt;qualifier,Map&lt;timestamp,value>>></code>
+   * <code>Map&amp;family,Map&lt;qualifier,Map&lt;timestamp,value&gt;&gt;&gt;</code>
    * <p>
    * Note: All other map returning methods make use of this map internally.
    * @return map from families to qualifiers to versions
@@ -643,7 +643,7 @@ public class Result implements CellScannable, CellScanner {
   /**
    * Map of families to their most recent qualifiers and values.
    * <p>
-   * Returns a two level Map of the form: <code>Map&amp;family,Map&lt;qualifier,value>></code>
+   * Returns a two level Map of the form: <code>Map&amp;family,Map&lt;qualifier,value&gt;&gt;</code>
    * <p>
    * The most recent version of each qualifier will be used.
    * @return map from families to qualifiers and value
@@ -675,7 +675,7 @@ public class Result implements CellScannable, CellScanner {
   /**
    * Map of qualifiers to values.
    * <p>
-   * Returns a Map of the form: <code>Map&lt;qualifier,value></code>
+   * Returns a Map of the form: <code>Map&lt;qualifier,value&gt;</code>
    * @param family column family to get
    * @return map of qualifiers to values
    */
@@ -945,4 +945,4 @@ public class Result implements CellScannable, CellScanner {
       throw new UnsupportedOperationException("Attempting to modify readonly EMPTY_RESULT!");
     }
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
index 6b7f1dd..d3efbda 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
@@ -42,7 +42,7 @@ public interface ResultScanner extends Closeable, Iterable<Result> {
 
   /**
    * @param nbRows number of rows to return
-   * @return Between zero and <param>nbRows</param> Results
+   * @return Between zero and nbRows results
    * @throws IOException e
    */
   Result [] next(int nbRows) throws IOException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java
index e468d3c..ea65fcf 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java
@@ -24,7 +24,7 @@ import java.io.IOException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
- * A Callable<T> that will be retried.  If {@link #call(int)} invocation throws exceptions,
+ * A Callable&lt;T&gt; that will be retried.  If {@link #call(int)} invocation throws exceptions,
  * we will call {@link #throwable(Throwable, boolean)} with whatever the exception was.
  * @param <T>
  */
@@ -42,7 +42,7 @@ public interface RetryingCallable<T> {
    * make it so we succeed on next call (clear caches, do relookup of locations, etc.).
    * @param t
    * @param retrying True if we are in retrying mode (we are not in retrying mode when max
-   * retries == 1; we ARE in retrying mode if retries > 1 even when we are the last attempt)
+   * retries == 1; we ARE in retrying mode if retries &gt; 1 even when we are the last attempt)
    */
   void throwable(final Throwable t, boolean retrying);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
index 8f28881..d610d8c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
@@ -174,6 +174,7 @@ public class RpcRetryingCallerWithReadReplicas {
   }
 
   /**
+   * <p>
    * Algo:
    * - we put the query into the execution pool.
    * - after x ms, if we don't have a result, we add the queries for the secondary replicas
@@ -186,7 +187,7 @@ public class RpcRetryingCallerWithReadReplicas {
    * - a call is a thread. Let's not multiply the number of thread by the number of replicas.
    * Server side, if we can cancel when it's still in the handler pool, it's much better, as a call
    * can take some i/o.
-   * <p/>
+   * </p>
    * Globally, the number of retries, timeout and so on still applies, but it's per replica,
    * not global. We continue until all retries are done, or all timeouts are exceeded.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index a0193fb..14b721b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -727,10 +727,10 @@ public class Scan extends Query {
    * this can deliver huge perf gains when there's a cf with lots of data; however, it can
    * also lead to some inconsistent results, as follows:
    * - if someone does a concurrent update to both column families in question you may get a row
-   *   that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } }
-   *   someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan
-   *   filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 },
-   *   { video => "my dog" } }.
+   *   that never existed, e.g. for { rowKey = 5, { cat_videos =&gt; 1 }, { video =&gt; "my cat" } }
+   *   someone puts rowKey 5 with { cat_videos =&gt; 0 }, { video =&gt; "my dog" }, concurrent scan
+   *   filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos =&gt; 1 },
+   *   { video =&gt; "my dog" } }.
    * - if there's a concurrent split and you have more than 2 column families, some rows may be
    *   missing some column families.
    */
@@ -982,7 +982,6 @@ public class Scan extends Query {
     return ProtobufUtil.toScanMetrics(bytes);
   }
 
-
   public Boolean isAsyncPrefetch() {
     return asyncPrefetch;
   }
@@ -991,6 +990,4 @@ public class Scan extends Query {
     this.asyncPrefetch = asyncPrefetch;
     return this;
   }
-
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
index 5421e57..594a459 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
@@ -64,16 +64,17 @@ import com.google.protobuf.Message;
  * <p>
  * This will serve as the client side handler for invoking the aggregate
  * functions.
- * <ul>
  * For all aggregate functions,
- * <li>start row < end row is an essential condition (if they are not
+ * <ul>
+ * <li>start row &lt; end row is an essential condition (if they are not
  * {@link HConstants#EMPTY_BYTE_ARRAY})
  * <li>Column family can't be null. In case where multiple families are
  * provided, an IOException will be thrown. An optional column qualifier can
- * also be defined.
+ * also be defined.</li>
  * <li>For methods to find maximum, minimum, sum, rowcount, it returns the
  * parameter type. For average and std, it returns a double value. For row
- * count, it returns a long value.
+ * count, it returns a long value.</li>
+ * </ul>
  * <p>Call {@link #close()} when done.
  */
 @InterfaceAudience.Private
@@ -109,10 +110,10 @@ public class AggregationClient implements Closeable {
    * @param tableName
    * @param ci
    * @param scan
-   * @return max val <R>
+   * @return max val &lt;R&gt;
    * @throws Throwable
    *           The caller is supposed to handle the exception as they are thrown
-   *           & propagated to it.
+   *           &amp; propagated to it.
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> R max(
       final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
@@ -129,10 +130,10 @@ public class AggregationClient implements Closeable {
    * @param table
    * @param ci
    * @param scan
-   * @return max val <R>
+   * @return max val &lt;&gt;
    * @throws Throwable
    *           The caller is supposed to handle the exception as they are thrown
-   *           & propagated to it.
+   *           &amp; propagated to it.
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> 
   R max(final Table table, final ColumnInterpreter<R, S, P, Q, T> ci,
@@ -199,7 +200,7 @@ public class AggregationClient implements Closeable {
    * @param tableName
    * @param ci
    * @param scan
-   * @return min val <R>
+   * @return min val &lt;R&gt;
    * @throws Throwable
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> R min(
@@ -217,7 +218,7 @@ public class AggregationClient implements Closeable {
    * @param table
    * @param ci
    * @param scan
-   * @return min val <R>
+   * @return min val &lt;R&gt;
    * @throws Throwable
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> 
@@ -269,11 +270,11 @@ public class AggregationClient implements Closeable {
    * optimised the operation. In case qualifier is provided, I can't use the
    * filter as it may set the flag to skip to next row, but the value read is
    * not of the given filter: in this case, this particular row will not be
-   * counted ==> an error.
+   * counted ==&gt; an error.
    * @param tableName
    * @param ci
    * @param scan
-   * @return <R, S>
+   * @return &lt;R, S&gt;
    * @throws Throwable
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> long rowCount(
@@ -290,11 +291,11 @@ public class AggregationClient implements Closeable {
    * optimised the operation. In case qualifier is provided, I can't use the
    * filter as it may set the flag to skip to next row, but the value read is
    * not of the given filter: in this case, this particular row will not be
-   * counted ==> an error.
+   * counted ==&gt; an error.
    * @param table
    * @param ci
    * @param scan
-   * @return <R, S>
+   * @return &lt;R, S&gt;
    * @throws Throwable
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> 
@@ -341,7 +342,7 @@ public class AggregationClient implements Closeable {
    * @param tableName
    * @param ci
    * @param scan
-   * @return sum <S>
+   * @return sum &lt;S&gt;
    * @throws Throwable
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> S sum(
@@ -358,7 +359,7 @@ public class AggregationClient implements Closeable {
    * @param table
    * @param ci
    * @param scan
-   * @return sum <S>
+   * @return sum &lt;S&gt;
    * @throws Throwable
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> 
@@ -485,7 +486,7 @@ public class AggregationClient implements Closeable {
    * @param tableName
    * @param ci
    * @param scan
-   * @return <R, S>
+   * @return &lt;R, S&gt;
    * @throws Throwable
    */
   public <R, S, P extends Message, Q extends Message, T extends Message>
@@ -504,7 +505,7 @@ public class AggregationClient implements Closeable {
    * @param table
    * @param ci
    * @param scan
-   * @return <R, S>
+   * @return &lt;R, S&gt;
    * @throws Throwable
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> double avg(
@@ -593,7 +594,7 @@ public class AggregationClient implements Closeable {
    * @param tableName
    * @param ci
    * @param scan
-   * @return <R, S>
+   * @return &lt;R, S&gt;
    * @throws Throwable
    */
   public <R, S, P extends Message, Q extends Message, T extends Message>
@@ -613,7 +614,7 @@ public class AggregationClient implements Closeable {
    * @param table
    * @param ci
    * @param scan
-   * @return <R, S>
+   * @return &lt;R, S&gt;
    * @throws Throwable
    */
   public <R, S, P extends Message, Q extends Message, T extends Message> double std(

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
index ecf4595..cf28c91 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
@@ -203,14 +203,9 @@ public class MyLittleHBaseClient {
   <li><a href="http://hbase.org">HBase Home Page</a>
   <li><a href="http://hadoop.apache.org/">Hadoop Home Page</a>
 </ul>
-</pre></code>
-</div>
-
   <p>See also the section in the HBase Reference Guide where it discusses
   <a href="http://hbase.apache.org/book.html#client">HBase Client</a>.  It
   has section on how to access HBase from inside your multithreaded environment
   how to control resources consumed client-side, etc.</p>
-</body>
-</html>
 */
 package org.apache.hadoop.hbase.client;

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
index 43efb66..e247c08 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
@@ -35,23 +35,23 @@ import com.google.protobuf.Message;
  * for an example.
  * <p>
  * Takes two generic parameters and three Message parameters. 
- * The cell value type of the interpreter is <T>.
+ * The cell value type of the interpreter is &lt;T&gt;.
  * During some computations like sum, average, the return type can be different
  * than the cell value data type, for eg, sum of int cell values might overflow
  * in case of a int result, we should use Long for its result. Therefore, this
  * class mandates to use a different (promoted) data type for result of these
- * computations <S>. All computations are performed on the promoted data type
- * <S>. There is a conversion method
- * {@link ColumnInterpreter#castToReturnType(Object)} which takes a <T> type and
- * returns a <S> type.
- * The AggregateImplementation uses PB messages to initialize the
+ * computations &lt;S&gt;. All computations are performed on the promoted data type
+ * &lt;S&gt;. There is a conversion method
+ * {@link ColumnInterpreter#castToReturnType(Object)} which takes a &lt;T&gt; type and
+ * returns a &lt;S&gt; type.
+ * The AggregateIm&gt;lementation uses PB messages to initialize the
  * user's ColumnInterpreter implementation, and for sending the responses
  * back to AggregationClient.
- * @param <T> Cell value data type
- * @param <S> Promoted data type
- * @param <P> PB message that is used to transport initializer specific bytes
- * @param <Q> PB message that is used to transport Cell (<T>) instance
- * @param <R> PB message that is used to transport Promoted (<S>) instance
+ * @param T Cell value data type
+ * @param S Promoted data type
+ * @param P PB message that is used to transport initializer specific bytes
+ * @param Q PB message that is used to transport Cell (&lt;T&gt;) instance
+ * @param R PB message that is used to transport Promoted (&lt;S&gt;) instance
  */
 @InterfaceAudience.Private
 public abstract class ColumnInterpreter<T, S, P extends Message, 
@@ -109,14 +109,15 @@ Q extends Message, R extends Message> {
    * This takes care if either of arguments are null. returns 0 if they are
    * equal or both are null;
    * <ul>
-   * <li>>0 if l1 > l2 or l1 is not null and l2 is null.
-   * <li>< 0 if l1 < l2 or l1 is null and l2 is not null.
+   * <li>&gt; 0 if l1 &gt; l2 or l1 is not null and l2 is null.</li>
+   * <li>&lt; 0 if l1 &lt; l2 or l1 is null and l2 is not null.</li>
+   * </ul>
    */
   public abstract int compare(final T l1, final T l2);
 
   /**
-   * used for computing average of <S> data values. Not providing the divide
-   * method that takes two <S> values as it is not needed as of now.
+   * used for computing average of &lt;S&gt; data values. Not providing the divide
+   * method that takes two &lt;S&gt; values as it is not needed as of now.
    * @param o
    * @param l
    * @return Average

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
index e79a4d5..ecead8c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
@@ -32,15 +32,16 @@ import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
 import com.google.protobuf.InvalidProtocolBufferException;
 
 /**
+ * <p>
  * This filter is used to filter based on the column family. It takes an
  * operator (equal, greater, not equal, etc) and a byte [] comparator for the
  * column family portion of a key.
- * <p/>
+ * </p><p>
  * This filter can be wrapped with {@link org.apache.hadoop.hbase.filter.WhileMatchFilter} and {@link org.apache.hadoop.hbase.filter.SkipFilter}
  * to add more control.
- * <p/>
+ * </p><p>
  * Multiple filters can be combined using {@link org.apache.hadoop.hbase.filter.FilterList}.
- * <p/>
+ * </p>
  * If an already known column family is looked for, use {@link org.apache.hadoop.hbase.client.Get#addFamily(byte[])}
  * directly rather than a filter.
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
index a2f9015..f7598d6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
@@ -261,7 +261,6 @@ public abstract class Filter {
    * @param pbBytes A pb serialized {@link Filter} instance
    * @return An instance of {@link Filter} made from <code>bytes</code>
    * @throws DeserializationException
-   * @throws IOException in case an I/O or an filter specific failure needs to be signaled.
    * @see #toByteArray
    */
   public static Filter parseFrom(final byte [] pbBytes) throws DeserializationException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
index 1bcd00a..08a6821 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
@@ -41,7 +41,7 @@ public abstract class FilterBase extends Filter {
    * Filters that are purely stateless and do nothing in their reset() methods can inherit
    * this null/empty implementation.
    *
-   * @inheritDoc
+   * {@inheritDoc}
    */
   @Override
   public void reset() throws IOException {
@@ -51,7 +51,7 @@ public abstract class FilterBase extends Filter {
    * Filters that do not filter by row key can inherit this implementation that
    * never filters anything. (ie: returns false).
    *
-   * @inheritDoc
+   * {@inheritDoc}
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
    *             Instead use {@link #filterRowKey(Cell)}
    */
@@ -72,7 +72,7 @@ public abstract class FilterBase extends Filter {
    * Filters that never filter all remaining can inherit this implementation that
    * never stops the filter early.
    *
-   * @inheritDoc
+   * {@inheritDoc}
    */
   @Override
   public boolean filterAllRemaining() throws IOException {
@@ -82,7 +82,7 @@ public abstract class FilterBase extends Filter {
   /**
    * By default no transformation takes place
    *
-   * @inheritDoc
+   * {@inheritDoc}
    */
   @Override
   public Cell transformCell(Cell v) throws IOException {
@@ -93,7 +93,7 @@ public abstract class FilterBase extends Filter {
    * Filters that never filter by modifying the returned List of Cells can
    * inherit this implementation that does nothing.
    *
-   * @inheritDoc
+   * {@inheritDoc}
    */
   @Override
   public void filterRowCells(List<Cell> ignored) throws IOException {
@@ -103,7 +103,7 @@ public abstract class FilterBase extends Filter {
    * Fitlers that never filter by modifying the returned List of Cells can
    * inherit this implementation that does nothing.
    *
-   * @inheritDoc
+   * {@inheritDoc}
    */
   @Override
   public boolean hasFilterRow() {
@@ -115,7 +115,7 @@ public abstract class FilterBase extends Filter {
    * {@link #filterKeyValue(Cell)} can inherit this implementation that
    * never filters a row.
    *
-   * @inheritDoc
+   * {@inheritDoc}
    */
   @Override
   public boolean filterRow() throws IOException {
@@ -126,7 +126,7 @@ public abstract class FilterBase extends Filter {
    * Filters that are not sure which key must be next seeked to, can inherit
    * this implementation that, by default, returns a null Cell.
    *
-   * @inheritDoc
+   * {@inheritDoc}
    */
   public Cell getNextCellHint(Cell currentCell) throws IOException {
     return null;
@@ -136,7 +136,7 @@ public abstract class FilterBase extends Filter {
    * By default, we require all scan's column families to be present. Our
    * subclasses may be more precise.
    *
-   * @inheritDoc
+   * {@inheritDoc}
    */
   public boolean isFamilyEssential(byte[] name) throws IOException {
     return true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
index 2f89251..8ba1ccb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
@@ -41,14 +41,14 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * Since you can use Filter Lists as children of Filter Lists, you can create a
  * hierarchy of filters to be evaluated.
  *
- * <br/>
+ * <br>
  * {@link Operator#MUST_PASS_ALL} evaluates lazily: evaluation stops as soon as one filter does
  * not include the KeyValue.
  *
- * <br/>
+ * <br>
  * {@link Operator#MUST_PASS_ONE} evaluates non-lazily: all filters are always evaluated.
  *
- * <br/>
+ * <br>
  * Defaults to {@link Operator#MUST_PASS_ALL}.
  */
 @InterfaceAudience.Public
@@ -315,7 +315,7 @@ final public class FilterList extends Filter {
    * Filters that never filter by modifying the returned List of Cells can
    * inherit this implementation that does nothing.
    *
-   * @inheritDoc
+   * {@inheritDoc}
    */
   @Override
   public void filterRowCells(List<Cell> cells) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
index e2f159b..c041914 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
@@ -44,7 +44,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * phoenix etc. However, both solutions are inefficient. Both of them can't utilize the range info
  * to perform fast forwarding during scan which is quite time consuming. If the number of ranges
  * are quite big (e.g. millions), join is a proper solution though it is slow. However, there are
- * cases that user wants to specify a small number of ranges to scan (e.g. <1000 ranges). Both
+ * cases that user wants to specify a small number of ranges to scan (e.g. &lt;1000 ranges). Both
  * solutions can't provide satisfactory performance in such case. MultiRowRangeFilter is to support
  * such usec ase (scan multiple row key ranges), which can construct the row key ranges from user
  * specified list and perform fast-forwarding during scan. Thus, the scan will be quite efficient.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java
index 7c68dd2..adc9c54 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java
@@ -31,11 +31,11 @@ import com.google.common.base.Preconditions;
 import com.google.protobuf.InvalidProtocolBufferException;
 /**
  * Implementation of Filter interface that limits results to a specific page
- * size. It terminates scanning once the number of filter-passed rows is >
+ * size. It terminates scanning once the number of filter-passed rows is &gt;
  * the given page size.
  * <p>
  * Note that this filter cannot guarantee that the number of results returned
- * to a client are <= page size. This is because the filter is applied
+ * to a client are &lt;= page size. This is because the filter is applied
  * separately on different region servers. It does however optimize the scan of
  * individual HRegions by making sure that the page size is never exceeded
  * locally.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java
index 71ea3c3..3aced13 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java
@@ -39,8 +39,9 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * entire row if any of its weights are zero.  In this case, we want to prevent
  * rows from being emitted if a single key is filtered.  Combine this filter
  * with a {@link ValueFilter}:
+ * </p>
  * <p>
- * <pre>
+ * <code>
  * scan.setFilter(new SkipFilter(new ValueFilter(CompareOp.NOT_EQUAL,
  *     new BinaryComparator(Bytes.toBytes(0))));
  * </code>
@@ -48,6 +49,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * (since ValueFilter will not pass that Cell).
  * Without this filter, the other non-zero valued columns in the row would still
  * be emitted.
+ * </p>
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java
index 2e4d0a6..005f03c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java
@@ -422,7 +422,6 @@ public class AsyncRpcClient extends AbstractRpcClient {
    * @param rpcTimeout default rpc operation timeout
    *
    * @return A rpc channel that goes via this rpc client instance.
-   * @throws IOException when channel could not be created
    */
   public RpcChannel createRpcChannel(final ServerName sn, final User user, int rpcTimeout) {
     return new RpcChannelImplementation(this, sn, user, rpcTimeout);

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java
index bbd2fc7..33fc880 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java
@@ -24,7 +24,7 @@ import java.net.InetSocketAddress;
 
 /**
  * This class holds the address and the user ticket, etc. The client connections
- * to servers are uniquely identified by <remoteAddress, ticket, serviceName>
+ * to servers are uniquely identified by &lt;remoteAddress, ticket, serviceName&gt;
  */
 @InterfaceAudience.Private
 public class ConnectionId {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
index 5511cb1..aa407f7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
@@ -35,7 +35,8 @@ import com.google.protobuf.RpcController;
  * When implementing {@link com.google.protobuf.Service} defined methods, 
  * coprocessor endpoints can use the following pattern to pass exceptions back to the RPC client:
  * <code>
- * public void myMethod(RpcController controller, MyRequest request, RpcCallback<MyResponse> done) {
+ * public void myMethod(RpcController controller, MyRequest request,
+ *     RpcCallback&lt;MyResponse&gt; done) {
  *   MyResponse response = null;
  *   try {
  *     // do processing

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index e816f61..a87fd47 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -252,7 +252,7 @@ public final class ProtobufUtil {
    * to flag what follows as a protobuf in hbase.  Prepend these bytes to all content written to
    * znodes, etc.
    * @param bytes Bytes to decorate
-   * @return The passed <code>bytes</codes> with magic prepended (Creates a new
+   * @return The passed <code>bytes</code> with magic prepended (Creates a new
    * byte array that is <code>bytes.length</code> plus {@link ProtobufMagic#PB_MAGIC}.length.
    */
   public static byte [] prependPBMagic(final byte [] bytes) {
@@ -2120,7 +2120,7 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Convert a ListMultimap<String, TablePermission> where key is username
+   * Convert a ListMultimap&lt;String, TablePermission&gt; where key is username
    * to a protobuf UserPermission
    *
    * @param perm the list of user and table permissions
@@ -2374,7 +2374,7 @@ public final class ProtobufUtil {
 
   /**
    * Convert a protobuf UserTablePermissions to a
-   * ListMultimap<String, TablePermission> where key is username.
+   * ListMultimap&lt;String, TablePermission&gt; where key is username.
    *
    * @param proto the protobuf UserPermission
    * @return the converted UserPermission

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index 9491795..a8fec87 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -51,11 +51,11 @@ import org.apache.hadoop.hbase.util.Strings;
  * Helper class to interact with the quota table.
  * <pre>
  *     ROW-KEY      FAM/QUAL        DATA
- *   n.<namespace> q:s         <global-quotas>
- *   t.<table>     q:s         <global-quotas>
- *   u.<user>      q:s         <global-quotas>
- *   u.<user>      q:s.<table> <table-quotas>
- *   u.<user>      q:s.<ns>:   <namespace-quotas>
+ *   n.&lt;namespace&gt; q:s         &lt;global-quotas&gt;
+ *   t.&lt;table&gt;     q:s         &lt;global-quotas&gt;
+ *   u.&lt;user&gt;      q:s         &lt;global-quotas&gt;
+ *   u.&lt;user&gt;      q:s.&lt;table&gt; &lt;table-quotas&gt;
+ *   u.&lt;user&gt;      q:s.&lt;ns&gt;:   &lt;namespace-quotas&gt;
  * </pre>
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java
index 75c967d..50b8b15 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java
@@ -34,7 +34,7 @@ public enum BloomType {
    */
   ROW,
   /**
-   * Bloom enabled with Table row & column (family+qualifier) as Key
+   * Bloom enabled with Table row &amp; column (family+qualifier) as Key
    */
   ROWCOL
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
index 359dbff..8e80e06 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
@@ -125,7 +125,7 @@ public interface ReplicationPeers {
    * have to be connected. The state is read directly from the backing store.
    * @param peerId a short that identifies the cluster
    * @return true if replication is enabled, false otherwise.
-   * @throws IOException Throws if there's an error contacting the store
+   * @throws ReplicationException thrown if there's an error contacting the store
    */
   boolean getStatusOfPeerFromBackingStore(String peerId) throws ReplicationException;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java
index 4968548..765559f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java
@@ -24,7 +24,8 @@ import org.apache.hadoop.hbase.util.Bytes;
 /**
  * This contains a visibility expression which can be associated with a cell. When it is set with a
  * Mutation, all the cells in that mutation will get associated with this expression. A visibility
- * expression can contain visibility labels combined with logical operators AND(&), OR(|) and NOT(!)
+ * expression can contain visibility labels combined with logical
+ * operators AND(&amp;), OR(|) and NOT(!)
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
@@ -51,7 +52,7 @@ public class CellVisibility {
   /**
    * Helps in quoting authentication Strings. Use this if unicode characters to
    * be used in expression or special characters like '(', ')',
-   * '"','\','&','|','!'
+   * '"','\','&amp;','|','!'
    */
   public static String quote(String auth) {
     return quote(Bytes.toBytes(auth));
@@ -60,7 +61,7 @@ public class CellVisibility {
   /**
    * Helps in quoting authentication Strings. Use this if unicode characters to
    * be used in expression or special characters like '(', ')',
-   * '"','\','&','|','!'
+   * '"','\','&amp;','|','!'
    */
   public static String quote(byte[] auth) {
     int escapeChars = 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
index 5cbb066..e71886a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
@@ -1206,8 +1206,6 @@ public class ZKUtil {
    * @param data data of node to create
    * @param cb
    * @param ctx
-   * @throws KeeperException if unexpected zookeeper exception
-   * @throws KeeperException.NodeExistsException if node already exists
    */
   public static void asyncCreate(ZooKeeperWatcher zkw,
       String znode, byte [] data, final AsyncCallback.StringCallback cb,
@@ -2027,7 +2025,6 @@ public class ZKUtil {
   /**
    * Recursively print the current state of ZK (non-transactional)
    * @param root name of the root directory in zk to print
-   * @throws KeeperException
    */
   public static void logZKTree(ZooKeeperWatcher zkw, String root) {
     if (!LOG.isDebugEnabled()) return;

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index d26874a..475e385 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -656,7 +656,6 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
   /**
    * Close the connection to ZooKeeper.
    *
-   * @throws InterruptedException
    */
   @Override
   public void close() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java
index 8f299cc..46ebeeb 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java
@@ -23,7 +23,8 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 
 
 /**
- * The unit of storage in HBase consisting of the following fields:<br/>
+ * The unit of storage in HBase consisting of the following fields:
+ * <br>
  * <pre>
  * 1) row
  * 2) column family
@@ -33,30 +34,36 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  * 6) MVCC version
  * 7) value
  * </pre>
- * <p/>
+ * <p>
  * Uniqueness is determined by the combination of row, column family, column qualifier,
  * timestamp, and type.
- * <p/>
+ * </p>
+ * <p>
  * The natural comparator will perform a bitwise comparison on row, column family, and column
  * qualifier. Less intuitively, it will then treat the greater timestamp as the lesser value with
  * the goal of sorting newer cells first.
- * <p/>
+ * </p>
+ * <p>
  * This interface should not include methods that allocate new byte[]'s such as those used in client
  * or debugging code. These users should use the methods found in the {@link CellUtil} class.
  * Currently for to minimize the impact of existing applications moving between 0.94 and 0.96, we
  * include the costly helper methods marked as deprecated.   
- * <p/>
- * Cell implements Comparable<Cell> which is only meaningful when comparing to other keys in the
+ * </p>
+ * <p>
+ * Cell implements Comparable&lt;Cell&gt; which is only meaningful when
+ * comparing to other keys in the
  * same table. It uses CellComparator which does not work on the -ROOT- and hbase:meta tables.
- * <p/>
+ * </p>
+ * <p>
  * In the future, we may consider adding a boolean isOnHeap() method and a getValueBuffer() method
  * that can be used to pass a value directly from an off-heap ByteBuffer to the network without
  * copying into an on-heap byte[].
- * <p/>
+ * </p>
+ * <p>
  * Historic note: the original Cell implementation (KeyValue) requires that all fields be encoded as
  * consecutive bytes in the same byte[], whereas this interface allows fields to reside in separate
  * byte[]'s.
- * <p/>
+ * </p>
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
@@ -77,7 +84,7 @@ public interface Cell {
   int getRowOffset();
 
   /**
-   * @return Number of row bytes. Must be < rowArray.length - offset.
+   * @return Number of row bytes. Must be &lt; rowArray.length - offset.
    */
   short getRowLength();
 
@@ -97,7 +104,7 @@ public interface Cell {
   int getFamilyOffset();
 
   /**
-   * @return Number of family bytes.  Must be < familyArray.length - offset.
+   * @return Number of family bytes.  Must be &lt; familyArray.length - offset.
    */
   byte getFamilyLength();
 
@@ -117,7 +124,7 @@ public interface Cell {
   int getQualifierOffset();
 
   /**
-   * @return Number of qualifier bytes.  Must be < qualifierArray.length - offset.
+   * @return Number of qualifier bytes.  Must be &lt; qualifierArray.length - offset.
    */
   int getQualifierLength();
 
@@ -148,7 +155,7 @@ public interface Cell {
    * cells in the memstore but is not retained forever. It may survive several flushes, but
    * generally becomes irrelevant after the cell's row is no longer involved in any operations that
    * require strict consistency.
-   * @return mvccVersion (always >= 0 if exists), or 0 if it no longer exists
+   * @return mvccVersion (always &gt;= 0 if exists), or 0 if it no longer exists
    */
   @Deprecated
   long getMvccVersion();
@@ -158,7 +165,7 @@ public interface Cell {
    * exists for cells in the memstore but is not retained forever. It will be kept for
    * {@link HConstants#KEEP_SEQID_PERIOD} days, but generally becomes irrelevant after the cell's
    * row is no longer involved in any operations that require strict consistency.
-   * @return seqId (always > 0 if exists), or 0 if it no longer exists
+   * @return seqId (always &gt; 0 if exists), or 0 if it no longer exists
    */
   long getSequenceId();
 
@@ -177,7 +184,7 @@ public interface Cell {
   int getValueOffset();
 
   /**
-   * @return Number of value bytes.  Must be < valueArray.length - offset.
+   * @return Number of value bytes.  Must be &lt; valueArray.length - offset.
    */
   int getValueLength();
   
@@ -199,7 +206,7 @@ public interface Cell {
   /**
    * WARNING do not use, expensive.  This gets an arraycopy of the cell's value.
    *
-   * Added to ease transition from  0.94 -> 0.96.
+   * Added to ease transition from  0.94 -&gt; 0.96.
    * 
    * @deprecated as of 0.96, use {@link CellUtil#cloneValue(Cell)}
    */
@@ -209,7 +216,7 @@ public interface Cell {
   /**
    * WARNING do not use, expensive.  This gets an arraycopy of the cell's family. 
    *
-   * Added to ease transition from  0.94 -> 0.96.
+   * Added to ease transition from  0.94 -&gt; 0.96.
    * 
    * @deprecated as of 0.96, use {@link CellUtil#cloneFamily(Cell)}
    */
@@ -219,7 +226,7 @@ public interface Cell {
   /**
    * WARNING do not use, expensive.  This gets an arraycopy of the cell's qualifier.
    *
-   * Added to ease transition from  0.94 -> 0.96.
+   * Added to ease transition from  0.94 -&gt; 0.96.
    * 
    * @deprecated as of 0.96, use {@link CellUtil#cloneQualifier(Cell)}
    */
@@ -229,10 +236,10 @@ public interface Cell {
   /**
    * WARNING do not use, expensive.  this gets an arraycopy of the cell's row.
    *
-   * Added to ease transition from  0.94 -> 0.96.
+   * Added to ease transition from  0.94 -&gt; 0.96.
    * 
    * @deprecated as of 0.96, use {@link CellUtil#getRowByte(Cell, int)}
    */
   @Deprecated
   byte[] getRow();
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
index e7ccbde..2d0c940 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
@@ -111,7 +111,7 @@ public class CellComparator implements Comparator<Cell>, Serializable {
    * @param b
    * @param ignoreSequenceid True if we are to compare the key portion only and ignore
    * the sequenceid. Set to false to compare key and consider sequenceid.
-   * @return 0 if equal, -1 if a < b, and +1 if a > b.
+   * @return 0 if equal, -1 if a &lt; b, and +1 if a &gt; b.
    */
   private final int compare(final Cell a, final Cell b, boolean ignoreSequenceid) {
     // row
@@ -682,4 +682,4 @@ public class CellComparator implements Comparator<Cell>, Serializable {
       return result;
     }
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScannable.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScannable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScannable.java
index 79e677e..0adb7b5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScannable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScannable.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 /**
  * Implementer can return a CellScanner over its Cell content.
  * Class name is ugly but mimicing java.util.Iterable only we are about the dumber
- * CellScanner rather than say Iterator<Cell>.  See CellScanner class comment for why we go
+ * CellScanner rather than say Iterator&lt;Cell&gt;.  See CellScanner class comment for why we go
  * dumber than java.util.Iterator.
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java
index f337122..b0460b7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  * may or may not point to a reusable cell implementation, so users of the CellScanner should not,
  * for example, accumulate a List of Cells. All of the references may point to the same object,
  * which would be the latest state of the underlying Cell. In short, the Cell is mutable.
- * <p/>
+ * </p>
  * Typical usage:
  *
  * <pre>

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 674ef6e..89a3f34 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1133,7 +1133,8 @@ public final class HConstants {
 
   /**
    * When using bucket cache, this is a float that EITHER represents a percentage of total heap
-   * memory size to give to the cache (if < 1.0) OR, it is the capacity in megabytes of the cache.
+   * memory size to give to the cache (if &lt; 1.0) OR, it is the capacity in
+   * megabytes of the cache.
    */
   public static final String BUCKET_CACHE_SIZE_KEY = "hbase.bucketcache.size";
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 315e9a3..cd17bef 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -64,18 +64,21 @@ import com.google.common.annotations.VisibleForTesting;
  * <p>
  * KeyValue wraps a byte array and takes offsets and lengths into passed array at where to start
  * interpreting the content as KeyValue. The KeyValue format inside a byte array is:
- * <code>&lt;keylength> &lt;valuelength> &lt;key> &lt;value></code> Key is further decomposed as:
- * <code>&lt;rowlength> &lt;row> &lt;columnfamilylength> &lt;columnfamily> &lt;columnqualifier>
- * &lt;timestamp> &lt;keytype></code>
+ * <code>&lt;keylength&gt; &lt;valuelength&gt; &lt;key&gt; &lt;value&gt;</code>
+ * Key is further decomposed as:
+ * <code>&lt;rowlength&gt; &lt;row&gt; &lt;columnfamilylength&gt;
+ * &lt;columnfamily&gt; &lt;columnqualifier&gt;
+ * &lt;timestamp&gt; &lt;keytype&gt;</code>
  * The <code>rowlength</code> maximum is <code>Short.MAX_SIZE</code>, column family length maximum
- * is <code>Byte.MAX_SIZE</code>, and column qualifier + key length must be <
+ * is <code>Byte.MAX_SIZE</code>, and column qualifier + key length must be &lt;
  * <code>Integer.MAX_SIZE</code>. The column does not contain the family/qualifier delimiter,
  * {@link #COLUMN_FAMILY_DELIMITER}<br>
  * KeyValue can optionally contain Tags. When it contains tags, it is added in the byte array after
- * the value part. The format for this part is: <code>&lt;tagslength>&lt;tagsbytes></code>.
+ * the value part. The format for this part is: <code>&lt;tagslength&gt;&lt;tagsbytes&gt;</code>.
  * <code>tagslength</code> maximum is <code>Short.MAX_SIZE</code>. The <code>tagsbytes</code>
  * contain one or more tags where as each tag is of the form
- * <code>&lt;taglength>&lt;tagtype>&lt;tagbytes></code>.  <code>tagtype</code> is one byte and
+ * <code>&lt;taglength&gt;&lt;tagtype&gt;&lt;tagbytes&gt;</code>.
+ * <code>tagtype</code> is one byte and
  * <code>taglength</code> maximum is <code>Short.MAX_SIZE</code> and it includes 1 byte type length
  * and actual tag bytes length.
  */
@@ -1163,7 +1166,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
    * as JSON. Values are left out due to their tendency to be large. If needed,
    * they can be added manually.
    *
-   * @return the Map<String,?> containing data from this key
+   * @return the Map&lt;String,?&gt; containing data from this key
    */
   public Map<String, Object> toStringMap() {
     Map<String, Object> stringMap = new HashMap<String, Object>();
@@ -1878,7 +1881,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
      * Compares the only the user specified portion of a Key.  This is overridden by MetaComparator.
      * @param left
      * @param right
-     * @return 0 if equal, <0 if left smaller, >0 if right smaller
+     * @return 0 if equal, &lt;0 if left smaller, &gt;0 if right smaller
      */
     protected int compareRowKey(final Cell left, final Cell right) {
       return CellComparator.COMPARATOR.compareRows(left, right);
@@ -1893,7 +1896,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
      * @param right
      * @param roffset
      * @param rlength
-     * @return  0 if equal, <0 if left smaller, >0 if right smaller
+     * @return  0 if equal, &lt;0 if left smaller, &gt;0 if right smaller
      */
     public int compareFlatKey(byte[] left, int loffset, int llength,
         byte[] right, int roffset, int rlength) {
@@ -2005,7 +2008,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
      * @param right
      * @param roffset
      * @param rlength
-     * @return 0 if equal, <0 if left smaller, >0 if right smaller
+     * @return 0 if equal, &lt;0 if left smaller, &gt;0 if right smaller
      */
     public int compareRows(byte [] left, int loffset, int llength,
         byte [] right, int roffset, int rlength) {
@@ -2054,7 +2057,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
      * @param right
      * @param roffset
      * @param rlength
-     * @return 0 if equal, <0 if left smaller, >0 if right smaller
+     * @return 0 if equal, &lt;0 if left smaller, &gt;0 if right smaller
      */
     @Override // SamePrefixComparator
     public int compareIgnoringPrefix(int commonPrefix, byte[] left,
@@ -2292,7 +2295,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
      * This is a HFile block index key optimization.
      * @param leftKey
      * @param rightKey
-     * @return 0 if equal, <0 if left smaller, >0 if right smaller
+     * @return 0 if equal, &lt;0 if left smaller, &gt;0 if right smaller
      * @deprecated Since 0.99.2;
      */
     @Deprecated

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
index 5035666..407c017 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
@@ -526,7 +526,7 @@ public class KeyValueUtil {
   /*************** misc **********************************/
   /**
    * @param cell
-   * @return <code>cell<code> if it is an instance of {@link KeyValue} else we will return a
+   * @return <code>cell</code> if it is an instance of {@link KeyValue} else we will return a
    * new {@link KeyValue} instance made from <code>cell</code>
    * @deprecated without any replacement.
    */