You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2015/06/14 02:45:20 UTC

[2/3] hbase git commit: HBASE-13569 Correct Javadoc (for Java8)

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
index f6f89b4..ad14f67 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
@@ -41,7 +41,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * servers on same hostname and port (startcode is usually timestamp of server startup). The
  * {@link #toString()} format of ServerName is safe to use in the  filesystem and as znode name
  * up in ZooKeeper.  Its format is:
- * <code>&lt;hostname> '{@link #SERVERNAME_SEPARATOR}' &lt;port> '{@link #SERVERNAME_SEPARATOR}' &lt;startcode></code>.
+ * <code>&lt;hostname&gt; '{@link #SERVERNAME_SEPARATOR}' &lt;port&gt;
+ * '{@link #SERVERNAME_SEPARATOR}' &lt;startcode&gt;</code>.
  * For example, if hostname is <code>www.example.org</code>, port is <code>1234</code>,
  * and the startcode for the regionserver is <code>1212121212</code>, then
  * the {@link #toString()} would be <code>www.example.org,1234,1212121212</code>.
@@ -224,7 +225,7 @@ public class ServerName implements Comparable<ServerName>, Serializable {
    * @param port
    * @param startcode
    * @return Server name made of the concatenation of hostname, port and
-   * startcode formatted as <code>&lt;hostname> ',' &lt;port> ',' &lt;startcode></code>
+   * startcode formatted as <code>&lt;hostname&gt; ',' &lt;port&gt; ',' &lt;startcode&gt;</code>
    */
   static String getServerName(String hostName, int port, long startcode) {
     final StringBuilder name = new StringBuilder(hostName.length() + 1 + 5 + 1 + 13);
@@ -237,10 +238,10 @@ public class ServerName implements Comparable<ServerName>, Serializable {
   }
 
   /**
-   * @param hostAndPort String in form of &lt;hostname> ':' &lt;port>
+   * @param hostAndPort String in form of &lt;hostname&gt; ':' &lt;port&gt;
    * @param startcode
    * @return Server name made of the concatenation of hostname, port and
-   * startcode formatted as <code>&lt;hostname> ',' &lt;port> ',' &lt;startcode></code>
+   * startcode formatted as <code>&lt;hostname&gt; ',' &lt;port&gt; ',' &lt;startcode&gt;</code>
    */
   public static String getServerName(final String hostAndPort,
       final long startcode) {
@@ -339,7 +340,7 @@ public class ServerName implements Comparable<ServerName>, Serializable {
 
   /**
    * @param str Either an instance of {@link ServerName#toString()} or a
-   * "'<hostname>' ':' '<port>'".
+   * "'&lt;hostname&gt;' ':' '&lt;port&gt;'".
    * @return A ServerName instance.
    */
   public static ServerName parseServerName(final String str) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
index 0781e1c..63066b3 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
@@ -126,8 +126,8 @@ public final class TableName implements Comparable<TableName> {
    * The name may not start with '.' or '-'.
    *
    * Valid fully qualified table names:
-   * foo:bar, namespace=>foo, table=>bar
-   * org:foo.bar, namespace=org, table=>foo.bar
+   * foo:bar, namespace=&gt;foo, table=&gt;bar
+   * org:foo.bar, namespace=org, table=&gt;foo.bar
    */
   public static byte [] isLegalFullyQualifiedTableName(final byte[] tableName) {
     if (tableName == null || tableName.length <= 0) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
index 34f1bf7..c6406f2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 /**
  * Accepts a stream of Cells. This can be used to build a block of cells during compactions
  * and flushes, or to build a byte[] to send to the client. This could be backed by a
- * List<KeyValue>, but more efficient implementations will append results to a
+ * List&lt;KeyValue&gt;, but more efficient implementations will append results to a
  * byte[] to eliminate overhead, and possibly encode the cells further.
  * <p>To read Cells, use {@link org.apache.hadoop.hbase.CellScanner}
  * @see org.apache.hadoop.hbase.CellScanner

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
index 8352e4e..ad1c984 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 /**
  * Represents an interval of version timestamps.
  * <p>
- * Evaluated according to minStamp <= timestamp < maxStamp
+ * Evaluated according to minStamp &lt;= timestamp &lt; maxStamp
  * or [minStamp,maxStamp) in interval notation.
  * <p>
  * Only used internally; should not be accessed directly by clients.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
index 62167d6..2d58a18 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  * on the local filesystem. It is configured with a URI passed in as a String
  * to init(). The URI should have the form:
  * <p>
- * <pre>    scheme://path?option1=value1&option2=value2</pre>
+ * <pre>    scheme://path?option1=value1&amp;option2=value2</pre>
  * <p>
  * <i>scheme</i> can be either "jks" or "jceks", specifying the file based
  * providers shipped with every JRE. The latter is the certificate store for

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
index e1da695..1bef221 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
@@ -61,7 +61,7 @@ public class ThrottledInputStream extends InputStream {
     rawStream.close();
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public int read() throws IOException {
     throttle();
@@ -72,7 +72,7 @@ public class ThrottledInputStream extends InputStream {
     return data;
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public int read(byte[] b) throws IOException {
     throttle();
@@ -83,7 +83,7 @@ public class ThrottledInputStream extends InputStream {
     return readLen;
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public int read(byte[] b, int off, int len) throws IOException {
     throttle();
@@ -159,7 +159,7 @@ public class ThrottledInputStream extends InputStream {
     return totalSleepTime;
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public String toString() {
     return "ThrottledInputStream{" +

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
index 9ca0964..4a3d42f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
@@ -45,7 +45,7 @@ public interface Dictionary {
    * 
    * @param data the byte array that we're looking up
    * @param offset Offset into <code>data</code> to add to Dictionary.
-   * @param length Length beyond <code>offset</code> that comprises entry; must be > 0.
+   * @param length Length beyond <code>offset</code> that comprises entry; must be &gt; 0.
    * @return the index of the entry, or {@link #NOT_IN_DICTIONARY} if not found
    */
   short findEntry(byte[] data, int offset, int length);
@@ -59,7 +59,7 @@ public interface Dictionary {
    * 
    * @param data the entry to add
    * @param offset Offset into <code>data</code> to add to Dictionary.
-   * @param length Length beyond <code>offset</code> that comprises entry; must be > 0.
+   * @param length Length beyond <code>offset</code> that comprises entry; must be &gt; 0.
    * @return the index of the entry
    */
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
index db71e8c..0efb402 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
@@ -194,7 +194,6 @@ public abstract class User {
    * @param action
    * @return the result of the action
    * @throws IOException
-   * @throws InterruptedException
    */
   @SuppressWarnings({ "rawtypes", "unchecked" })
   public static <T> T runAsLoginUser(PrivilegedExceptionAction<T> action) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java
index 4ba15ec..550088a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
  * scenario where the end of the buffer has been reached but there are still
  * nullable fields remaining in the {@code Struct} definition. When this
  * happens, it will produce null entries for the remaining values. For example:
+ * </p>
  * <pre>
  * StructBuilder builder = new StructBuilder()
  *     .add(OrderedNumeric.ASCENDING) // nullable
@@ -57,11 +58,10 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
  * Object[] val = new Object[] { BigDecimal.ONE, "foo" };
  * shorter.encode(buf1, val); // write short value with short Struct
  * buf1.setPosition(0); // reset position marker, prepare for read
- * longer.decode(buf1); // => { BigDecimal.ONE, "foo", null } ; long Struct reads implied null
+ * longer.decode(buf1); // =&gt; { BigDecimal.ONE, "foo", null } ; long Struct reads implied null
  * longer.encode(buf2, val); // write short value with long struct
- * Bytes.equals(buf1.getBytes(), buf2.getBytes()); // => true; long Struct skips writing null
+ * Bytes.equals(buf1.getBytes(), buf2.getBytes()); // =&gt; true; long Struct skips writing null
  * </pre>
- * </p>
  * <h3>Sort Order</h3>
  * <p>
  * {@code Struct} instances sort according to the composite order of their

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java
index cd41658..b151b89 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java
@@ -33,7 +33,7 @@ public abstract class AbstractByteRange implements ByteRange {
   // reuse objects of this class
 
   /**
-   * The array containing the bytes in this range. It will be >= length.
+   * The array containing the bytes in this range. It will be &gt;= length.
    */
   protected byte[] bytes;
 
@@ -44,7 +44,7 @@ public abstract class AbstractByteRange implements ByteRange {
   protected int offset;
 
   /**
-   * The number of bytes in the range. Offset + length must be <= bytes.length
+   * The number of bytes in the range. Offset + length must be &lt;= bytes.length
    */
   protected int length;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
index fce0d40..31fb1f5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
@@ -37,7 +37,7 @@ public class Addressing {
   public static final String HOSTNAME_PORT_SEPARATOR = ":";
 
   /**
-   * @param hostAndPort Formatted as <code>&lt;hostname> ':' &lt;port></code>
+   * @param hostAndPort Formatted as <code>&lt;hostname&gt; ':' &lt;port&gt;</code>
    * @return An InetSocketInstance
    */
   public static InetSocketAddress createInetSocketAddressFromHostAndPortStr(
@@ -50,7 +50,7 @@ public class Addressing {
    * @param port Server port
    * @return Returns a concatenation of <code>hostname</code> and
    * <code>port</code> in following
-   * form: <code>&lt;hostname> ':' &lt;port></code>.  For example, if hostname
+   * form: <code>&lt;hostname&gt; ':' &lt;port&gt;</code>.  For example, if hostname
    * is <code>example.org</code> and port is 1234, this method will return
    * <code>example.org:1234</code>
    */
@@ -59,7 +59,7 @@ public class Addressing {
   }
 
   /**
-   * @param hostAndPort Formatted as <code>&lt;hostname> ':' &lt;port></code>
+   * @param hostAndPort Formatted as <code>&lt;hostname&gt; ':' &lt;port&gt;</code>
    * @return The hostname portion of <code>hostAndPort</code>
    */
   public static String parseHostname(final String hostAndPort) {
@@ -71,7 +71,7 @@ public class Addressing {
   }
 
   /**
-   * @param hostAndPort Formatted as <code>&lt;hostname> ':' &lt;port></code>
+   * @param hostAndPort Formatted as <code>&lt;hostname&gt; ':' &lt;port&gt;</code>
    * @return The port portion of <code>hostAndPort</code>
    */
   public static int parsePort(final String hostAndPort) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java
index d1f4f20..a22133d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java
@@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  * </p>
  * <ul>
  *   <li>v2.2.1 - Fixed bug using URL_SAFE and ORDERED encodings. Fixed bug
- *     when using very small files (~< 40 bytes).</li>
+ *     when using very small files (~&lt; 40 bytes).</li>
  *   <li>v2.2 - Added some helper methods for encoding/decoding directly from
  *     one file to the next. Also added a main() method to support command
  *     line encoding/decoding from one file to the next. Also added these

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
index 88b728f..d547db1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
@@ -35,6 +35,8 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  * </p>
  * <p>
  * This interface differs from ByteBuffer:
+ * </p>
+ * <ul>
  * <li>On-heap bytes only</li>
  * <li>Raw {@code byte} access only; does not encode other primitives.</li>
  * <li>Implements {@code equals(Object)}, {@code #hashCode()}, and
@@ -46,7 +48,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  * <li>Can be reused in tight loops like a major compaction which can save
  * significant amounts of garbage. (Without reuse, we throw off garbage like
  * <a href="http://www.youtube.com/watch?v=lkmBH-MjZF4">this thing</a>.)</li>
- * </p>
+ * </ul>
  * <p>
  * Mutable, and always evaluates {@code #equals(Object)}, {@code #hashCode()},
  * and {@code #compareTo(ByteRange)} based on the current contents.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index 5d45260..683b559 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -1377,7 +1377,7 @@ public class Bytes implements Comparable<Bytes> {
    * @param offset Offset into array at which vint begins.
    * @throws java.io.IOException e
    * @return deserialized long from buffer.
-   * @deprecated Use {@link #readAsVLong(byte[], int)} instead.
+   * @deprecated Use {@link #readAsVLong(byte[],int)} instead.
    */
   @Deprecated
   public static long readVLong(final byte [] buffer, final int offset)
@@ -1409,7 +1409,7 @@ public class Bytes implements Comparable<Bytes> {
   /**
    * @param left left operand
    * @param right right operand
-   * @return 0 if equal, < 0 if left is less than right, etc.
+   * @return 0 if equal, &lt; 0 if left is less than right, etc.
    */
   public static int compareTo(final byte [] left, final byte [] right) {
     return LexicographicalComparerHolder.BEST_COMPARER.
@@ -1425,7 +1425,7 @@ public class Bytes implements Comparable<Bytes> {
    * @param offset2 Where to start comparing in the right buffer
    * @param length1 How much to compare from the left buffer
    * @param length2 How much to compare from the right buffer
-   * @return 0 if equal, < 0 if left is less than right, etc.
+   * @return 0 if equal, &lt; 0 if left is less than right, etc.
    */
   public static int compareTo(byte[] buffer1, int offset1, int length1,
       byte[] buffer2, int offset2, int length2) {
@@ -2213,7 +2213,7 @@ public class Bytes implements Comparable<Bytes> {
    * Bytewise binary increment/deincrement of long contained in byte array
    * on given amount.
    *
-   * @param value - array of bytes containing long (length <= SIZEOF_LONG)
+   * @param value - array of bytes containing long (length &lt;= SIZEOF_LONG)
    * @param amount value will be incremented on (deincremented if negative)
    * @return array of bytes containing incremented long (length == SIZEOF_LONG)
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
index 9f5a88b..77acf9b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
@@ -305,7 +305,7 @@ public class ClassSize {
   /**
    * Aligns a number to 8.
    * @param num number to align to 8
-   * @return smallest number >= input that is a multiple of 8
+   * @return smallest number &gt;= input that is a multiple of 8
    */
   public static int align(int num) {
     return (int)(align((long)num));
@@ -314,7 +314,7 @@ public class ClassSize {
   /**
    * Aligns a number to 8.
    * @param num number to align to 8
-   * @return smallest number >= input that is a multiple of 8
+   * @return smallest number &gt;= input that is a multiple of 8
    */
   public static long align(long num) {
     //The 7 comes from that the alignSize is 8 which is the number of bytes

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
index 17ed7b7..1096a17 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
@@ -27,8 +27,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 public class DefaultEnvironmentEdge implements EnvironmentEdge {
   /**
    * {@inheritDoc}
-   * <p/>
+   * <p>
    * This implementation returns {@link System#currentTimeMillis()}
+   * </p>
    */
   @Override
   public long currentTime() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java
index 7b5ecd0..482c5f0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java
@@ -45,9 +45,10 @@ public class IncrementingEnvironmentEdge implements EnvironmentEdge {
 
   /**
    * {@inheritDoc}
-   * <p/>
+   * <p>
    * This method increments a known value for the current time each time this
    * method is called. The first value is 1.
+   * </p>
    */
   @Override
   public synchronized long currentTime() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java
index 8ee214d..789bd8d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java
@@ -66,11 +66,11 @@ public class JenkinsHash extends Hash {
    * <p>The best hash table sizes are powers of 2.  There is no need to do mod
    * a prime (mod is sooo slow!).  If you need less than 32 bits, use a bitmask.
    * For example, if you need only 10 bits, do
-   * <code>h = (h & hashmask(10));</code>
+   * <code>h = (h &amp; hashmask(10));</code>
    * In which case, the hash table should have hashsize(10) elements.
    *
    * <p>If you are hashing n strings byte[][] k, do it like this:
-   * for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h);
+   * for (int i = 0, h = 0; i &lt; n; ++i) h = hash( k[i], h);
    *
    * <p>By Bob Jenkins, 2006.  bob_jenkins@burtleburtle.net.  You may use this
    * code any way you wish, private, educational, or commercial.  It's free.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
index 2e69291..5398582 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
@@ -36,8 +36,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
  * A utility class to manage a set of locks. Each lock is identified by a String which serves
  * as a key. Typical usage is: <p>
  * class Example{
- * private final static KeyLocker<String> locker = new Locker<String>();
- * <p/>
+ * private final static KeyLocker&lt;String&gt; locker = new Locker&lt;String&gt;();
+ * </p>
+ * <p>
  * public void foo(String s){
  * Lock lock = locker.acquireLock(s);
  * try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java
index 20282ff..499e34c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java
@@ -40,7 +40,8 @@ import com.google.common.annotations.VisibleForTesting;
  * Each value is encoded as one or more bytes. The first byte of the encoding,
  * its meaning, and a terse description of the bytes that follow is given by
  * the following table:
- * <table>
+ * </p>
+ * <table summary="Encodings">
  * <tr><th>Content Type</th><th>Encoding</th></tr>
  * <tr><td>NULL</td><td>0x05</td></tr>
  * <tr><td>negative infinity</td><td>0x07</td></tr>
@@ -63,7 +64,6 @@ import com.google.common.annotations.VisibleForTesting;
  * <tr><td>variable length BLOB</td><td>0x35, B</td></tr>
  * <tr><td>byte-for-byte BLOB</td><td>0x36, X</td></tr>
  * </table>
- * </p>
  *
  * <h3>Null Encoding</h3>
  * <p>
@@ -258,8 +258,8 @@ import com.google.common.annotations.VisibleForTesting;
  * values are 5 bytes in length.
  * </p>
  * <p>
- * {@code OrderedBytes} encodings are heavily influenced by the <a href="
- * http://sqlite.org/src4/doc/trunk/www/key_encoding.wiki">SQLite4 Key
+ * {@code OrderedBytes} encodings are heavily influenced by the
+ * <a href="http://sqlite.org/src4/doc/trunk/www/key_encoding.wiki">SQLite4 Key
  * Encoding</a>. Slight deviations are make in the interest of order
  * correctness and user extensibility. Fixed-width {@code Long} and
  * {@link Double} encodings are based on implementations from the now defunct
@@ -1408,6 +1408,7 @@ public class OrderedBytes {
    * -Double.MIN_VALUE &lt; -0.0 &lt; +0.0; &lt; Double.MIN_VALUE &lt; ...
    * &lt; Double.MAX_VALUE &lt; Double.POSITIVE_INFINITY &lt; Double.NaN
    * </p>
+   * <p>
    * Floating point numbers are encoded as specified in IEEE 754. A 64-bit
    * double precision float consists of a sign bit, 11-bit unsigned exponent
    * encoded in offset-1023 notation, and a 52-bit significand. The format is

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
index 4ec0820..8e7751d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
@@ -68,9 +68,9 @@ public class Sleeper {
   }
 
   /**
-   * Sleep for period adjusted by passed <code>startTime<code>
+   * Sleep for period adjusted by passed <code>startTime</code>
    * @param startTime Time some task started previous to now.  Time to sleep
-   * will be docked current time minus passed <code>startTime<code>.
+   * will be docked current time minus passed <code>startTime</code>.
    */
   public void sleep(final long startTime) {
     if (this.stopper.isStopped()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java
index 4c14335..3ab783a 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java
@@ -102,7 +102,7 @@ public interface BaseSource {
 
   /**
    * Get the name of the context in JMX that this source will be exposed through.
-   * This is in ObjectName format. With the default context being Hadoop -> HBase
+   * This is in ObjectName format. With the default context being Hadoop -&gt; HBase
    */
   String getMetricsJmxContext();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java
index f44a445..f703eef 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java
@@ -47,9 +47,10 @@ import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.io.WritableUtils;
 
 /**
+ * <p>
  * This class is created via reflection in DataBlockEncoding enum. Update the enum if class name or
  * package changes.
- * <p/>
+ * </p>
  * PrefixTreeDataBlockEncoder implementation of DataBlockEncoder. This is the primary entry point
  * for PrefixTree encoding and decoding. Encoding is delegated to instances of
  * {@link PrefixTreeEncoder}, and decoding is delegated to instances of

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
index 73e8ab4..a4b4c353 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
@@ -63,8 +63,9 @@ public class PrefixTreeSeeker implements EncodedSeeker {
   }
 
   /**
+   * <p>
    * Currently unused.
-   * <p/>
+   * </p>
    * TODO performance leak. should reuse the searchers. hbase does not currently have a hook where
    * this can be called
    */
@@ -110,12 +111,13 @@ public class PrefixTreeSeeker implements EncodedSeeker {
   }
 
   /**
+   * <p>
    * Currently unused.
-   * <p/>
+   * </p><p>
    * A nice, lightweight reference, though the underlying cell is transient. This method may return
    * the same reference to the backing PrefixTreeCell repeatedly, while other implementations may
    * return a different reference for each Cell.
-   * <p/>
+   * </p>
    * The goal will be to transition the upper layers of HBase, like Filters and KeyValueHeap, to
    * use this method instead of the getKeyValue() methods above.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java
index effad57..f0b249f 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java
@@ -25,10 +25,11 @@ import java.util.concurrent.LinkedBlockingQueue;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
+ * <p>
  * Pools PrefixTreeArraySearcher objects. Each Searcher can consist of hundreds or thousands of
  * objects and 1 is needed for each HFile during a Get operation. With tens of thousands of
  * Gets/second, reusing these searchers may save a lot of young gen collections.
- * <p/>
+ * </p>
  * Alternative implementation would be a ByteBufferSearcherPool (not implemented yet).
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
index ec54c2a..eb0e41f 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
@@ -28,10 +28,11 @@ import org.apache.hadoop.hbase.codec.prefixtree.scanner.CellSearcher;
 import com.google.common.primitives.UnsignedBytes;
 
 /**
+ * <p>
  * Searcher extends the capabilities of the Scanner + ReversibleScanner to add the ability to
  * position itself on a requested Cell without scanning through cells before it. The PrefixTree is
  * set up to be a Trie of rows, so finding a particular row is extremely cheap.
- * <p/>
+ * </p>
  * Once it finds the row, it does a binary search through the cells inside the row, which is not as
  * fast as the trie search, but faster than iterating through every cell like existing block
  * formats
@@ -309,8 +310,8 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im
   /****************** complete seek when token mismatch ******************/
 
   /**
-   * @param searcherIsAfterInputKey <0: input key is before the searcher's position<br/>
-   *          >0: input key is after the searcher's position
+   * @param searcherIsAfterInputKey &lt;0: input key is before the searcher's position<br/>
+   *          &gt;0: input key is after the searcher's position
    */
   protected CellScannerPosition fixRowTokenMissReverse(int searcherIsAfterInputKey) {
     if (searcherIsAfterInputKey < 0) {//searcher position is after the input key, so back up
@@ -337,8 +338,8 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im
   }
 
   /**
-   * @param searcherIsAfterInputKey <0: input key is before the searcher's position<br/>
-   *                   >0: input key is after the searcher's position
+   * @param searcherIsAfterInputKey &lt;0: input key is before the searcher's position<br>
+   *                   &gt;0: input key is after the searcher's position
    */
   protected CellScannerPosition fixRowTokenMissForward(int searcherIsAfterInputKey) {
     if (searcherIsAfterInputKey < 0) {//searcher position is after the input key

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java
index 3e4b75c..926cf30 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java
@@ -46,12 +46,12 @@ import org.apache.hadoop.io.WritableUtils;
 /**
  * This is the primary class for converting a CellOutputStream into an encoded byte[]. As Cells are
  * added they are completely copied into the various encoding structures. This is important because
- * usually the cells being fed in during compactions will be transient.<br/>
- * <br/>
- * Usage:<br/>
- * 1) constructor<br/>
- * 4) append cells in sorted order: write(Cell cell)<br/>
- * 5) flush()<br/>
+ * usually the cells being fed in during compactions will be transient.<br>
+ * <br>
+ * Usage:<br>
+ * 1) constructor<br>
+ * 4) append cells in sorted order: write(Cell cell)<br>
+ * 5) flush()<br>
  */
 @InterfaceAudience.Private
 public class PrefixTreeEncoder implements CellOutputStream {
@@ -391,10 +391,11 @@ public class PrefixTreeEncoder implements CellOutputStream {
   }
 
   /**
+   * <p>
    * The following "compile" methods do any intermediate work necessary to transform the cell
    * fragments collected during the writing phase into structures that are ready to write to the
    * outputStream.
-   * <p/>
+   * </p>
    * The family and qualifier treatment is almost identical, as is timestamp and mvccVersion.
    */
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java
index c1eb03d..467e7ad 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java
@@ -32,14 +32,17 @@ import org.apache.hadoop.hbase.util.vint.UFIntTool;
 import org.apache.hadoop.hbase.util.vint.UVIntTool;
 
 /**
+ * <p>
  * Column nodes can be either family nodes or qualifier nodes, as both sections encode similarly.
  * The family and qualifier sections of the data block are made of 1 or more of these nodes.
- * <p/>
- * Each node is composed of 3 sections:<br/>
+ * </p>
+ * Each node is composed of 3 sections:<br>
+ * <ul>
  * <li>tokenLength: UVInt (normally 1 byte) indicating the number of token bytes
  * <li>token[]: the actual token bytes
  * <li>parentStartPosition: the offset of the next node from the start of the family or qualifier
  * section
+ * </ul>
  */
 @InterfaceAudience.Private
 public class ColumnNodeWriter{

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java
index 3ceae63..b30daf6 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java
@@ -34,9 +34,10 @@ import org.apache.hadoop.hbase.util.vint.UFIntTool;
 import com.google.common.collect.Lists;
 
 /**
+ * <p>
  * Takes the tokenized family or qualifier data and flattens it into a stream of bytes. The family
  * section is written after the row section, and qualifier section after family section.
- * <p/>
+ * </p>
  * The family and qualifier tries, or "column tries", are structured differently than the row trie.
  * The trie cannot be reassembled without external data about the offsets of the leaf nodes, and
  * these external pointers are stored in the nubs and leaves of the row trie. For each cell in a
@@ -45,12 +46,13 @@ import com.google.common.collect.Lists;
  * comprises the column name. To assemble the column name, the trie is traversed in reverse (right
  * to left), with the rightmost tokens pointing to the start of their "parent" node which is the
  * node to the left.
- * <p/>
+ * <p>
  * This choice was made to reduce the size of the column trie by storing the minimum amount of
  * offset data. As a result, to find a specific qualifier within a row, you must do a binary search
  * of the column nodes, reassembling each one as you search. Future versions of the PrefixTree might
  * encode the columns in both a forward and reverse trie, which would convert binary searches into
  * more efficient trie searches which would be beneficial for wide rows.
+ * </p>
  */
 @InterfaceAudience.Private
 public class ColumnSectionWriter {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java
index 5c184bf..35f264b 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java
@@ -188,8 +188,9 @@ public class RowNodeWriter{
    * offsets into the timestamp/column data structures that are written in the middle of the block.
    * We use {@link UFIntTool} to encode these indexes/offsets to allow random access during a binary
    * search of a particular column/timestamp combination.
-   * <p/>
+   * <p>
    * Branch nodes will not have any data in these sections.
+   * </p>
    */
 
   protected void writeFamilyNodeOffsets(OutputStream os) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java
index 75a11ad..f44017b 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java
@@ -31,10 +31,12 @@ import com.google.common.collect.Lists;
 
 /**
  * Data structure used in the first stage of PrefixTree encoding:
+ * <ul>
  * <li>accepts a sorted stream of ByteRanges
  * <li>splits them into a set of tokens, each held by a {@link TokenizerNode}
  * <li>connects the TokenizerNodes via standard java references
  * <li>keeps a pool of TokenizerNodes and a reusable byte[] for holding all token content
+ * </ul>
  * <p><br>
  * Mainly used for turning Cell rowKeys into a trie, but also used for family and qualifier
  * encoding.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
index e51d5be..7da78a7 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
@@ -35,12 +35,12 @@ import com.google.common.collect.Lists;
  * Individual node in a Trie structure.  Each node is one of 3 types:
  * <li>Branch: an internal trie node that may have a token and must have multiple children, but does
  * not represent an actual input byte[], hence its numOccurrences is 0
- * <li>Leaf: a node with no children and where numOccurrences is >= 1.  It's token represents the
+ * <li>Leaf: a node with no children and where numOccurrences is &gt;= 1.  It's token represents the
  * last bytes in the input byte[]s.
  * <li>Nub: a combination of a branch and leaf.  Its token represents the last bytes of input
- * byte[]s and has numOccurrences >= 1, but it also has child nodes which represent input byte[]s
+ * byte[]s and has numOccurrences &gt;= 1, but it also has child nodes which represent input byte[]s
  * that add bytes to this nodes input byte[].
- * <br/><br/>
+ * <br><br>
  * Example inputs (numInputs=7):
  * 0: AAA
  * 1: AAA
@@ -49,13 +49,13 @@ import com.google.common.collect.Lists;
  * 4: AAB
  * 5: AABQQ
  * 6: AABQQ
- * <br/><br/>
+ * <br><br>
  * Resulting TokenizerNodes:
- * AA <- branch, numOccurrences=0, tokenStartOffset=0, token.length=2
- * A  <- leaf, numOccurrences=2, tokenStartOffset=2, token.length=1
- * B  <- nub, numOccurrences=3, tokenStartOffset=2, token.length=1
- * QQ <- leaf, numOccurrences=2, tokenStartOffset=3, token.length=2
- * <br/><br/>
+ * AA &lt;- branch, numOccurrences=0, tokenStartOffset=0, token.length=2
+ * A  &lt;- leaf, numOccurrences=2, tokenStartOffset=2, token.length=1
+ * B  &lt;- nub, numOccurrences=3, tokenStartOffset=2, token.length=1
+ * QQ &lt;- leaf, numOccurrences=2, tokenStartOffset=3, token.length=2
+ * <br><br>
  * numInputs == 7 == sum(numOccurrences) == 0 + 2 + 3 + 2
  */
 @InterfaceAudience.Private
@@ -236,13 +236,15 @@ public class TokenizerNode{
   /**
    * Called when we need to convert a leaf node into a branch with 2 leaves. Comments inside the
    * method assume we have token BAA starting at tokenStartOffset=0 and are adding BOO. The output
-   * will be 3 nodes:<br/>
-   * <li>1: B <- branch
-   * <li>2: AA <- leaf
-   * <li>3: OO <- leaf
+   * will be 3 nodes:<br>
+   * <ul>
+   * <li>1: B &lt;- branch
+   * <li>2: AA &lt;- leaf
+   * <li>3: OO &lt;- leaf
+   * </ul>
    *
-   * @param numTokenBytesToRetain => 1 (the B)
-   * @param bytes => BOO
+   * @param numTokenBytesToRetain =&gt; 1 (the B)
+   * @param bytes =&gt; BOO
    */
   protected void split(int numTokenBytesToRetain, final ByteRange bytes) {
     int childNodeDepth = nodeDepth;

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
index 7e83457..a3ae097 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
@@ -33,19 +33,22 @@ public interface CellSearcher extends ReversibleCellScanner {
   void resetToBeforeFirstEntry();
 
   /**
+   * <p>
    * Do everything within this scanner's power to find the key. Look forward and backwards.
-   * <p/>
+   * </p>
+   * <p>
    * Abort as soon as we know it can't be found, possibly leaving the Searcher in an invalid state.
-   * <p/>
+   * </p>
    * @param key position the CellScanner exactly on this key
    * @return true if the cell existed and getCurrentCell() holds a valid cell
    */
   boolean positionAt(Cell key);
 
   /**
+   * <p>
    * Same as positionAt(..), but go to the extra effort of finding the previous key if there's no
    * exact match.
-   * <p/>
+   * </p>
    * @param key position the CellScanner on this key or the closest cell before
    * @return AT if exact match<br/>
    *         BEFORE if on last cell before key<br/>
@@ -54,9 +57,10 @@ public interface CellSearcher extends ReversibleCellScanner {
   CellScannerPosition positionAtOrBefore(Cell key);
 
   /**
+   * <p>
    * Same as positionAt(..), but go to the extra effort of finding the next key if there's no exact
    * match.
-   * <p/>
+   * </p>
    * @param key position the CellScanner on this key or the closest cell after
    * @return AT if exact match<br/>
    *         AFTER if on first cell after key<br/>
@@ -65,43 +69,47 @@ public interface CellSearcher extends ReversibleCellScanner {
   CellScannerPosition positionAtOrAfter(Cell key);
 
   /**
+   * <p>
    * Note: Added for backwards compatibility with
    * {@link org.apache.hadoop.hbase.regionserver.KeyValueScanner#reseek}
-   * <p/>
+   * </p><p>
    * Look for the key, but only look after the current position. Probably not needed for an
    * efficient tree implementation, but is important for implementations without random access such
    * as unencoded KeyValue blocks.
-   * <p/>
+   * </p>
    * @param key position the CellScanner exactly on this key
    * @return true if getCurrent() holds a valid cell
    */
   boolean seekForwardTo(Cell key);
 
   /**
+   * <p>
    * Same as seekForwardTo(..), but go to the extra effort of finding the next key if there's no
    * exact match.
-   * <p/>
+   * </p>
    * @param key
-   * @return AT if exact match<br/>
-   *         AFTER if on first cell after key<br/>
+   * @return AT if exact match<br>
+   *         AFTER if on first cell after key<br>
    *         AFTER_LAST if key was after the last cell in this scanner's scope
    */
   CellScannerPosition seekForwardToOrBefore(Cell key);
 
   /**
+   * <p>
    * Same as seekForwardTo(..), but go to the extra effort of finding the next key if there's no
    * exact match.
-   * <p/>
+   * </p>
    * @param key
-   * @return AT if exact match<br/>
-   *         AFTER if on first cell after key<br/>
+   * @return AT if exact match<br>
+   *         AFTER if on first cell after key<br>
    *         AFTER_LAST if key was after the last cell in this scanner's scope
    */
   CellScannerPosition seekForwardToOrAfter(Cell key);
 
   /**
+   * <p>
    * Note: This may not be appropriate to have in the interface.  Need to investigate.
-   * <p/>
+   * </p>
    * Position the scanner in an invalid state after the last cell: CellScannerPosition.AFTER_LAST.
    * This is used by tests and for handling certain edge cases.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java
index 3823e7c..c15429b 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java
@@ -35,7 +35,7 @@ public interface ReversibleCellScanner extends CellScanner {
   /**
    * Try to position the scanner one Cell before the current position.
    * @return true if the operation was successful, meaning getCurrentCell() will return a valid
-   *         Cell.<br/>
+   *         Cell.<br>
    *         false if there were no previous cells, meaning getCurrentCell() will return null.
    *         Scanner position will be
    *         {@link org.apache.hadoop.hbase.codec.prefixtree.scanner.CellScannerPosition#BEFORE_FIRST}
@@ -46,7 +46,7 @@ public interface ReversibleCellScanner extends CellScanner {
    * Try to position the scanner in the row before the current row.
    * @param endOfRow true for the last cell in the previous row; false for the first cell
    * @return true if the operation was successful, meaning getCurrentCell() will return a valid
-   *         Cell.<br/>
+   *         Cell.<br>
    *         false if there were no previous cells, meaning getCurrentCell() will return null.
    *         Scanner position will be
    *         {@link org.apache.hadoop.hbase.codec.prefixtree.scanner.CellScannerPosition#BEFORE_FIRST}

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java
index fc7c107..a3da9f0 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java
@@ -29,10 +29,10 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
  * This class converts between positive ints and 1-4 bytes that represent the int.  All input ints
  * must be positive.  Max values stored in N bytes are:
  *
- * N=1: 2^8  =>           256
- * N=2: 2^16 =>        65,536
- * N=3: 2^24 =>    16,777,216
- * N=4: 2^31 => 2,147,483,648 (Integer.MAX_VALUE)
+ * N=1: 2^8  =&gt;           256
+ * N=2: 2^16 =&gt;        65,536
+ * N=3: 2^24 =&gt;    16,777,216
+ * N=4: 2^31 =&gt; 2,147,483,648 (Integer.MAX_VALUE)
  *
  * This was created to get most of the memory savings of a variable length integer when encoding
  * an array of input integers, but to fix the number of bytes for each integer to the number needed

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java
index dd4095b..aeebd2c 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java
@@ -42,7 +42,7 @@ public class UVIntTool {
   public static final byte[]
     MAX_VALUE_BYTES = new byte[] { -1, -1, -1, -1, 7 };
 
-  /********************* int -> bytes **************************/
+  /********************* int -&gt; bytes **************************/
 
   public static int numBytes(int in) {
     if (in == 0) {
@@ -79,7 +79,7 @@ public class UVIntTool {
     return numBytes;
   }
 
-  /******************** bytes -> int **************************/
+  /******************** bytes -&gt; int **************************/
 
   public static int getInt(byte[] bytes) {
     return getInt(bytes, 0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java
index b2437a8..b55e0f6 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java
@@ -43,7 +43,7 @@ public class UVLongTool{
     MAX_VALUE_BYTES = new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, 127 };
 
 
-  /********************* long -> bytes **************************/
+  /********************* long -&gt; bytes **************************/
 
   public static int numBytes(long in) {// do a check for illegal arguments if not protected
     if (in == 0) {
@@ -77,7 +77,7 @@ public class UVLongTool{
     return numBytes;
   }
 
-  /******************** bytes -> long **************************/
+  /******************** bytes -&gt; long **************************/
 
   public static long getLong(byte[] bytes) {
     return getLong(bytes, 0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index 00a12eb..13de210 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -50,9 +50,9 @@ import com.google.protobuf.ByteString;
  * the return is a set of sub-procedures or null in case the procedure doesn't
  * have sub-procedures. Once the sub-procedures are successfully completed
  * the execute() method is called again, you should think at it as a stack:
- *  -> step 1
- *  ---> step 2
- *  -> step 1
+ *  -&gt; step 1
+ *  ---&gt; step 2
+ *  -&gt; step 1
  *
  * rollback() is called when the procedure or one of the sub-procedures is failed.
  * the rollback step is supposed to cleanup the resources created during the

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
index bcb0424..636a037 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProc
 
 /**
  * A SequentialProcedure describes one step in a procedure chain.
- *   -> Step 1 -> Step 2 -> Step 3
+ *   -&gt; Step 1 -&gt; Step 2 -&gt; Step 3
  *
  * The main difference from a base Procedure is that the execute() of a
  * SequentialProcedure will be called only once, there will be no second
@@ -79,4 +79,4 @@ public abstract class SequentialProcedure<TEnvironment> extends Procedure<TEnvir
     SequentialProcedureData data = SequentialProcedureData.parseDelimitedFrom(stream);
     executed = data.getExecuted();
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java b/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java
index 933a6e2..4ffd590 100644
--- a/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java
+++ b/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java
@@ -37,6 +37,8 @@ public final class HBaseZeroCopyByteString extends LiteralByteString {
 
   /**
    * Wraps a byte array in a {@link ByteString} without copying it.
+   * @param array array to be wrapped
+   * @return wrapped array
    */
   public static ByteString wrap(final byte[] array) {
     return new LiteralByteString(array);
@@ -44,6 +46,10 @@ public final class HBaseZeroCopyByteString extends LiteralByteString {
 
   /**
    * Wraps a subset of a byte array in a {@link ByteString} without copying it.
+   * @param array array to be wrapped
+   * @param offset from
+   * @param length length
+   * @return wrapped array
    */
   public static ByteString wrap(final byte[] array, int offset, int length) {
     return new BoundedByteString(array, offset, length);

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java
index 23d26dc..d1216f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java
@@ -99,7 +99,7 @@ public interface InterProcessLock {
   /**
    * Visits the locks (both held and attempted) of this type with the given
    * {@link MetadataHandler}.
-   * @throws InterruptedException If there is an unrecoverable error
+   * @throws IOException If there is an unrecoverable error
    */
   void visitLocks(MetadataHandler handler) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java
index 880875f..3258cbb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java
@@ -38,7 +38,7 @@ public class HFileArchiveTableMonitor {
    * Set the tables to be archived. Internally adds each table and attempts to
    * register it.
    * <p>
-   * <b>Note: All previous tables will be removed in favor of these tables.<b>
+   * <b>Note: All previous tables will be removed in favor of these tables.</b>
    * @param tables add each of the tables to be archived.
    */
   public synchronized void setArchiveTables(List<String> tables) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
index 31746b6..42da0ee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
  *  {@link org.apache.hadoop.hbase.client.Put}.
  * <p>Does <b>NOT</b> attempt the
  *  {@link org.apache.hadoop.hbase.client.Put} multiple times, 
- *  since the constraint <it>should</it> fail every time for 
+ *  since the constraint <b>should</b> fail every time for
  *  the same {@link org.apache.hadoop.hbase.client.Put} (it should be
  * idempotent).
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java
index 9bffc5c..6729f7c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java
@@ -19,7 +19,6 @@
 /**
  * Restrict the domain of a data attribute, often times to fulfill business rules/requirements.
  *
- <p>
  <h2> Table of Contents</h2>
  <ul>
  <li><a href="#overview">Overview</a></li>
@@ -27,7 +26,6 @@
  <li><a href="#caveats">Caveats</a></li>
  <li><a href="#usage">Example Usage</a></li>
  </ul>
- </p>
 
  <h2><a name="overview">Overview</a></h2>
  Constraints are used to enforce business rules in a database.
@@ -127,9 +125,9 @@
  public class IntegerConstraint extends BaseConstraint {
  public void check(Put p) throws ConstraintException {
 
- Map&ltbyte[], List&ltKeyValue&gt&gt familyMap = p.getFamilyMap();
+ Map&lt;byte[], List&lt;KeyValue&gt;&gt; familyMap = p.getFamilyMap();
 
- for (List &ltKeyValue&gt kvs : familyMap.values()) {
+ for (List &lt;KeyValue&gt; kvs : familyMap.values()) {
  for (KeyValue kv : kvs) {
 
  // just make sure that we can actually pull out an int

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
index 917df5b..67fe96a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
@@ -137,7 +137,7 @@ public interface SplitLogManagerCoordination {
    * It removes recovering regions from Coordination
    * @param serverNames servers which are just recovered
    * @param isMetaRecovery whether current recovery is for the meta region on
-   *          <code>serverNames<code>
+   *          <code>serverNames</code>
    */
   void removeRecoveringRegions(Set<String> serverNames, Boolean isMetaRecovery) throws IOException;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
index 6619eaa..7925cb0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
@@ -290,7 +290,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
    * region server hosting the region can allow reads to the recovered region
    * @param recoveredServerNameSet servers which are just recovered
    * @param isMetaRecovery whether current recovery is for the meta region on
-   *          <code>serverNames<code>
+   *          <code>serverNames</code>
    */
   @Override
   public void removeRecoveringRegions(final Set<String> recoveredServerNameSet,
@@ -684,8 +684,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
 
   /**
    * ZooKeeper implementation of
-   * {@link org.apache.hadoop.hbase.coordination.
-   * SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
+   * {@link SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
    */
   @Override
   public void removeStaleRecoveringRegions(final Set<String> knownFailedServers)

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java
index 81c933b..cc78626 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java
@@ -51,11 +51,11 @@ import com.google.protobuf.Service;
  * {@link ColumnInterpreter} is used to interpret column value. This class is
  * parameterized with the following (these are the types with which the {@link ColumnInterpreter}
  * is parameterized, and for more description on these, refer to {@link ColumnInterpreter}):
- * @param <T> Cell value data type
- * @param <S> Promoted data type
- * @param <P> PB message that is used to transport initializer specific bytes
- * @param <Q> PB message that is used to transport Cell (<T>) instance
- * @param <R> PB message that is used to transport Promoted (<S>) instance
+ * @param T Cell value data type
+ * @param S Promoted data type
+ * @param P PB message that is used to transport initializer specific bytes
+ * @param Q PB message that is used to transport Cell (&lt;T&gt;) instance
+ * @param R PB message that is used to transport Promoted (&lt;S&gt;) instance
  */
 @InterfaceAudience.Private
 public class AggregateImplementation<T, S, P extends Message, Q extends Message, R extends Message> 
@@ -229,7 +229,6 @@ extends AggregateService implements CoprocessorService, Coprocessor {
   /**
    * Gives the row count for the given column family and column qualifier, in
    * the given row range as defined in the Scan object.
-   * @throws IOException
    */
   @Override
   public void getRowNum(RpcController controller, AggregateRequest request,

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
index 3e5acc2..e771a92 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
@@ -52,14 +52,14 @@ import com.google.protobuf.Service;
  *
  * Defines a protocol to perform multi row transactions.
  * See {@link MultiRowMutationEndpoint} for the implementation.
- * </br>
+ * <br>
  * See
  * {@link HRegion#mutateRowsWithLocks(java.util.Collection, java.util.Collection)}
  * for details and limitations.
- * </br>
+ * <br>
  * Example:
- * <code><pre>
- * List<Mutation> mutations = ...;
+ * <code>
+ * List&lt;Mutation&gt; mutations = ...;
  * Put p1 = new Put(row1);
  * Put p2 = new Put(row2);
  * ...
@@ -73,7 +73,7 @@ import com.google.protobuf.Service;
  *    MultiRowMutationService.newBlockingStub(channel);
  * MutateRowsRequest mrm = mrmBuilder.build();
  * service.mutateRows(null, mrm);
- * </pre></code>
+ * </code>
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index 507a1bb..93eb5f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -344,7 +344,7 @@ public interface RegionObserver extends Coprocessor {
    * (e.getRegion() returns the parent region)
    * @throws IOException if an error occurred on the coprocessor
    * @deprecated Use preSplit(
-   *    final ObserverContext<RegionCoprocessorEnvironment> c, byte[] splitRow)
+   *    final ObserverContext&lt;RegionCoprocessorEnvironment&gt; c, byte[] splitRow)
    */
   @Deprecated
   void preSplit(final ObserverContext<RegionCoprocessorEnvironment> c) throws IOException;
@@ -1068,7 +1068,8 @@ public interface RegionObserver extends Coprocessor {
    * <li>
    * <code>boolean filterRow()</code> returning true</li>
    * <li>
-   * <code>void filterRow(List<KeyValue> kvs)</code> removing all the kvs from the passed List</li>
+   * <code>void filterRow(List&lt;KeyValue&gt; kvs)</code> removing all the kvs
+   * from the passed List</li>
    * </ol>
    * @param c the environment provided by the region server
    * @param s the scanner
@@ -1095,7 +1096,8 @@ public interface RegionObserver extends Coprocessor {
    * <li>
    * <code>boolean filterRow()</code> returning true</li>
    * <li>
-   * <code>void filterRow(List<KeyValue> kvs)</code> removing all the kvs from the passed List</li>
+   * <code>void filterRow(List<KeyValue> kvs)</code> removing all the kvs from
+   * the passed List</li>
    * </ol>
    * @param c the environment provided by the region server
    * @param s the scanner

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java
index c4777e1..d175aff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java
@@ -62,7 +62,7 @@ when the corresponding events happen. The master transitions regions
 through the following states:
 <p>
 &nbsp;&nbsp;&nbsp;
-unassigned -> pendingOpen -> open -> pendingClose -> closed.
+unassigned -&gt; pendingOpen -&gt; open -&gt; pendingClose -7gt; closed.
 <p>
 Coprocessors have opportunity to intercept and handle events in
 pendingOpen, open, and pendingClose states.
@@ -75,7 +75,7 @@ can piggyback or fail this process.
 <p>
 <ul>
   <li>preOpen, postOpen: Called before and after the region is reported as
- online to the master.</li><p>
+ online to the master.</li>
 </ul>
 <p>
 <h3>Open</h3>
@@ -85,9 +85,9 @@ split, etc.). Coprocessors can piggyback administrative actions via:
 <p>
 <ul>
   <li>preFlush, postFlush: Called before and after the memstore is flushed
-  into a new store file.</li><p>
-  <li>preCompact, postCompact: Called before and after compaction.</li><p>
-  <li>preSplit, postSplit: Called after the region is split.</li><p>
+  into a new store file.</li>
+  <li>preCompact, postCompact: Called before and after compaction.</li>
+  <li>preSplit, postSplit: Called after the region is split.</li>
 </ul>
 <p>
 <h3>PendingClose</h3>
@@ -99,7 +99,7 @@ an indication to this effect will be passed as an argument.
 <p>
 <ul>
   <li>preClose and postClose: Called before and after the region is
-  reported as closed to the master.</li><p>
+  reported as closed to the master.</li>
 </ul>
 <p>
 
@@ -109,23 +109,23 @@ observe and mediate client actions on the region:
 <p>
 <ul>
   <li>preGet, postGet: Called before and after a client makes a Get
-  request.</li><p>
+  request.</li>
   <li>preExists, postExists: Called before and after the client tests
-  for existence using a Get.</li><p>
+  for existence using a Get.</li>
   <li>prePut and postPut: Called before and after the client stores a value.
-  </li><p>
+  </li>
   <li>preDelete and postDelete: Called before and after the client
-  deletes a value.</li><p>
+  deletes a value.</li>
   <li>preScannerOpen postScannerOpen: Called before and after the client
-  opens a new scanner.</li><p>
+  opens a new scanner.</li>
   <li>preScannerNext, postScannerNext: Called before and after the client
-  asks for the next row on a scanner.</li><p>
+  asks for the next row on a scanner.</li>
   <li>preScannerClose, postScannerClose: Called before and after the client
-  closes a scanner.</li><p>
+  closes a scanner.</li>
   <li>preCheckAndPut, postCheckAndPut: Called before and after the client
-  calls checkAndPut().</li><p>
+  calls checkAndPut().</li>
   <li>preCheckAndDelete, postCheckAndDelete: Called before and after the client
-  calls checkAndDelete().</li><p>
+  calls checkAndDelete().</li>
 </ul>
 You can also extend abstract class <code>BaseRegionObserverCoprocessor</code>
 which
@@ -245,7 +245,7 @@ recognize and load it.
 </div>
 <p>
 &lt;path&gt; must point to a jar, can be on any filesystem supported by the
-Hadoop </code>FileSystem</code> object.
+Hadoop <code>FileSystem</code> object.
 <p>
 &lt;class&gt; is the coprocessor implementation class. A jar can contain
 more than one coprocessor implementation, but only one can be specified
@@ -270,7 +270,7 @@ policy implementations, perhaps) ahead of observers.
     ":" + Coprocessor.Priority.USER);
   HBaseAdmin admin = new HBaseAdmin(this.conf);
   admin.createTable(htd);
-
+</pre></blockquote>
 <h3>Chain of RegionObservers</h3>
 As described above, multiple coprocessors can be loaded at one region at the
 same time. In case of RegionObserver, you can have more than one
@@ -278,8 +278,6 @@ RegionObservers register to one same hook point, i.e, preGet(), etc.
 When a region reach the
 hook point, the framework will invoke each registered RegionObserver by the
 order of assigned priority.
-
-</pre></blockquote>
 </div>
 
 */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java
index 750f87c..746c59b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java
@@ -38,7 +38,8 @@ public class TimeoutException extends Exception {
    * Exception indicating that an operation attempt has timed out
    * @param start time the operation started (ms since epoch)
    * @param end time the timeout was triggered (ms since epoch)
-   * @param expected expected amount of time for the operation to complete (ms) (ideally, expected <= end-start)
+   * @param expected expected amount of time for the operation to complete (ms)
+   *                 (ideally, expected &lt;= end-start)
    */
   public TimeoutException(String sourceName, long start, long end, long expected) {
     super("Timeout elapsed! Source:" + sourceName + " Start:" + start + ", End:" + end

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 84c3548..4ce2d94 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -94,9 +94,9 @@ import com.sun.jersey.spi.container.servlet.ServletContainer;
  * Create a Jetty embedded server to answer http requests. The primary goal
  * is to serve up status information for the server.
  * There are three contexts:
- *   "/logs/" -> points to the log directory
- *   "/static/" -> points to common static files (src/webapps/static)
- *   "/" -> the jsp server code from (src/webapps/<name>)
+ *   "/logs/" -&gt; points to the log directory
+ *   "/static/" -&gt; points to common static files (src/webapps/static)
+ *   "/" -&gt; the jsp server code from (src/webapps/<name>)
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -425,7 +425,7 @@ public class HttpServer implements FilterContainer {
 
   /**
    * Create a status server on the given port.
-   * The jsp scripts are taken from src/webapps/<name>.
+   * The jsp scripts are taken from src/webapps/&lt;name&gt;.
    * @param name The name of the server
    * @param port The port to use on the server
    * @param findPort whether the server should start at the given port and
@@ -1108,13 +1108,14 @@ public class HttpServer implements FilterContainer {
 
   /**
    * Checks the user has privileges to access to instrumentation servlets.
-   * <p/>
+   * <p>
    * If <code>hadoop.security.instrumentation.requires.admin</code> is set to FALSE
    * (default value) it always returns TRUE.
-   * <p/>
+   * </p><p>
    * If <code>hadoop.security.instrumentation.requires.admin</code> is set to TRUE
    * it will check that if the current user is in the admin ACLS. If the user is
    * in the admin ACLs it returns TRUE, otherwise it returns FALSE.
+   * </p>
    *
    * @param servletContext the servlet context.
    * @param request the servlet request.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java
index 7549a3e..e4a971a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java
@@ -16,9 +16,8 @@
  * limitations under the License.
  */
 /**
- * </ul>
  * <p>
- * Copied from hadoop source code.<br/>
+ * Copied from hadoop source code.<br>
  * See https://issues.apache.org/jira/browse/HADOOP-10232 to know why.
  * </p>
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
index 1c5a593..3caf67f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
@@ -91,7 +91,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
 public class FileLink {
   private static final Log LOG = LogFactory.getLog(FileLink.class);
 
-  /** Define the Back-reference directory name prefix: .links-<hfile>/ */
+  /** Define the Back-reference directory name prefix: .links-&lt;hfile&gt;/ */
   public static final String BACK_REFERENCES_DIRECTORY_PREFIX = ".links-";
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
index ff33951..c17720c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
@@ -401,7 +401,6 @@ public class HFileLink extends FileLink {
    * @param rootDir root hbase directory
    * @param linkRefPath Link Back Reference path
    * @return full path of the referenced hfile
-   * @throws IOException on unexpected error.
    */
   public static Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath) {
     Pair<TableName, String> p = parseBackReferenceName(linkRefPath.getName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java
index fc5bd5d..344d496 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java
@@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.util.FSUtils;
 /**
  * WALLink describes a link to a WAL.
  *
- * An wal can be in /hbase/.logs/<server>/<wal>
- * or it can be in /hbase/.oldlogs/<wal>
+ * An wal can be in /hbase/.logs/&lt;server&gt;/&lt;wal&gt;
+ * or it can be in /hbase/.oldlogs/&lt;wal&gt;
  *
  * The link checks first in the original path,
  * if it is not present it fallbacks to the archived path.

http://git-wip-us.apache.org/repos/asf/hbase/blob/682b8ab8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 35458a2..d18dada 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -119,11 +119,12 @@ import com.google.common.base.Preconditions;
  * File is made of data blocks followed by meta data blocks (if any), a fileinfo
  * block, data block index, meta data block index, and a fixed size trailer
  * which records the offsets at which file changes content type.
- * <pre>&lt;data blocks>&lt;meta blocks>&lt;fileinfo>&lt;data index>&lt;meta index>&lt;trailer></pre>
+ * <pre>&lt;data blocks&gt;&lt;meta blocks&gt;&lt;fileinfo&gt;&lt;
+ * data index&gt;&lt;meta index&gt;&lt;trailer&gt;</pre>
  * Each block has a bit of magic at its start.  Block are comprised of
  * key/values.  In data blocks, they are both byte arrays.  Metadata blocks are
  * a String key and a byte array value.  An empty file looks like this:
- * <pre>&lt;fileinfo>&lt;trailer></pre>.  That is, there are not data nor meta
+ * <pre>&lt;fileinfo&gt;&lt;trailer&gt;</pre>.  That is, there are not data nor meta
  * blocks present.
  * <p>
  * TODO: Do scanners need to be able to take a start and end row?