You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2014/12/01 19:13:25 UTC

[2/2] hbase git commit: HBASE-12603 Remove javadoc warnings introduced due to removal of unused imports (Varun Saxena)

HBASE-12603 Remove javadoc warnings introduced due to removal of unused imports (Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/56a03d73
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/56a03d73
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/56a03d73

Branch: refs/heads/master
Commit: 56a03d736a6fe21651074db0f46e7198d3dcc69b
Parents: b6b88ed
Author: stack <st...@apache.org>
Authored: Mon Dec 1 10:13:16 2014 -0800
Committer: stack <st...@apache.org>
Committed: Mon Dec 1 10:13:16 2014 -0800

----------------------------------------------------------------------
 .../classification/InterfaceStability.java      | 12 +++++++----
 .../hbase/client/ReversedScannerCallable.java   |  9 ++++++---
 .../hadoop/hbase/client/ScannerCallable.java    |  3 ++-
 .../hbase/coprocessor/ColumnInterpreter.java    |  8 +++++---
 .../hadoop/hbase/filter/QualifierFilter.java    |  3 ++-
 .../apache/hadoop/hbase/filter/RowFilter.java   |  3 ++-
 .../hbase/filter/SingleColumnValueFilter.java   |  3 ++-
 .../hadoop/hbase/ipc/CoprocessorRpcChannel.java |  4 ++--
 .../hbase/ipc/RemoteWithExtrasException.java    |  3 ++-
 .../hadoop/hbase/ipc/ServerRpcController.java   | 11 +++++-----
 .../org/apache/hadoop/hbase/util/PoolMap.java   |  2 +-
 .../org/apache/hadoop/hbase/codec/Codec.java    |  5 +++--
 .../hadoop/hbase/io/CellOutputStream.java       |  4 ++--
 .../org/apache/hadoop/hbase/types/DataType.java |  3 ++-
 .../hadoop/hbase/types/RawBytesFixedLength.java |  8 +++++---
 .../hadoop/hbase/types/RawBytesTerminated.java  |  8 ++++----
 .../hbase/types/RawStringFixedLength.java       |  9 +++++----
 .../hadoop/hbase/types/RawStringTerminated.java |  9 +++++----
 .../hbase/util/AbstractPositionedByteRange.java |  6 +++---
 .../hadoop/hbase/util/ConcurrentIndex.java      |  4 ++--
 .../hadoop/hbase/util/PositionedByteRange.java  |  2 +-
 .../example/ZooKeeperScanPolicyObserver.java    |  6 ++++--
 .../example/LongTermArchivingHFileCleaner.java  |  8 +++++---
 .../hbase/client/TableSnapshotScanner.java      |  5 +++--
 .../hbase/constraint/ConstraintException.java   | 10 ++++++----
 .../SplitLogManagerCoordination.java            |  4 ++--
 .../SplitLogWorkerCoordination.java             |  3 ++-
 .../ZKSplitLogManagerCoordination.java          | 15 ++++++++------
 .../ZkSplitLogWorkerCoordination.java           |  4 ++--
 .../hbase/coprocessor/RegionObserver.java       | 16 +++++++++------
 .../hadoop/hbase/io/hfile/HFileBlock.java       |  8 ++++----
 .../hadoop/hbase/io/hfile/HFileBlockIndex.java  |  6 +++---
 .../hadoop/hbase/io/hfile/HFileScanner.java     |  2 +-
 .../hadoop/hbase/io/hfile/LruBlockCache.java    | 16 +++++++--------
 .../hbase/io/hfile/bucket/BucketCache.java      |  4 ++--
 .../apache/hadoop/hbase/mapred/RowCounter.java  |  2 +-
 .../hbase/mapred/TableInputFormatBase.java      |  8 +++++---
 .../hadoop/hbase/mapred/TableRecordReader.java  |  2 +-
 .../hbase/mapred/TableRecordReaderImpl.java     |  2 +-
 .../hadoop/hbase/mapreduce/HLogInputFormat.java |  3 ++-
 .../hbase/mapreduce/IdentityTableReducer.java   |  6 +++---
 .../hbase/mapreduce/TableRecordReaderImpl.java  |  2 +-
 .../mapreduce/TableSnapshotInputFormat.java     | 12 +++++------
 .../org/apache/hadoop/hbase/master/HMaster.java |  2 +-
 .../hadoop/hbase/master/SplitLogManager.java    |  5 +++--
 .../hbase/master/balancer/BaseLoadBalancer.java |  5 +++--
 .../balancer/FavoredNodeLoadBalancer.java       | 21 ++++++++++----------
 .../master/balancer/SimpleLoadBalancer.java     |  3 ++-
 .../hbase/master/handler/LogReplayHandler.java  |  7 ++++---
 .../hbase/master/handler/TotesHRegionInfo.java  |  2 +-
 .../master/snapshot/MasterSnapshotVerifier.java |  3 ++-
 .../hbase/procedure/MasterProcedureManager.java | 21 +++++++++++---------
 .../procedure/MasterProcedureManagerHost.java   |  4 ++--
 .../RegionServerProcedureManagerHost.java       |  3 ++-
 .../MiniBatchOperationInProgress.java           |  9 ++++++---
 .../hbase/regionserver/RegionScanner.java       |  3 ++-
 .../hbase/regionserver/SplitLogWorker.java      |  9 +++++----
 .../HBaseInterClusterReplicationEndpoint.java   |  3 ++-
 .../RegionReplicaReplicationEndpoint.java       |  5 +++--
 .../hadoop/hbase/util/BloomFilterFactory.java   |  2 +-
 .../org/apache/hadoop/hbase/util/FSUtils.java   |  2 +-
 .../hadoop/hbase/util/HFileArchiveUtil.java     |  6 ++++--
 .../hbase/zookeeper/ClusterStatusTracker.java   |  6 +++---
 .../hadoop/hbase/zookeeper/ZKSplitLog.java      |  3 ++-
 64 files changed, 223 insertions(+), 164 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java
----------------------------------------------------------------------
diff --git a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java
index d0d23b6..338b375 100644
--- a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java
+++ b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java
@@ -27,10 +27,14 @@ import java.lang.annotation.RetentionPolicy;
  * class or method not changing over time. Currently the stability can be
  * {@link Stable}, {@link Evolving} or {@link Unstable}. <br>
  *
- * <ul><li>All classes that are annotated with {@link Public} or
- * {@link LimitedPrivate} must have InterfaceStability annotation. </li>
- * <li>Classes that are {@link Private} are to be considered unstable unless
- * a different InterfaceStability annotation states otherwise.</li>
+ * <ul><li>All classes that are annotated with 
+ * {@link org.apache.hadoop.hbase.classification.InterfaceAudience.Public} or
+ * {@link org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate} 
+ * must have InterfaceStability annotation. </li>
+ * <li>Classes that are 
+ * {@link org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate} 
+ * are to be considered unstable unless a different InterfaceStability annotation
+ *  states otherwise.</li>
  * <li>Incompatible changes must not be made to classes marked as stable.</li>
  * </ul>
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
index 417012a..346342e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java
@@ -54,7 +54,8 @@ public class ReversedScannerCallable extends ScannerCallable {
    * @param scan
    * @param scanMetrics
    * @param locateStartRow The start row for locating regions
-   * @param rpcFactory to create an {@link RpcController} to talk to the regionserver
+   * @param rpcFactory to create an {@link com.google.protobuf.RpcController} 
+   * to talk to the regionserver
    */
   public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
       ScanMetrics scanMetrics, byte[] locateStartRow, RpcControllerFactory rpcFactory) {
@@ -68,7 +69,8 @@ public class ReversedScannerCallable extends ScannerCallable {
    * @param scan
    * @param scanMetrics
    * @param locateStartRow The start row for locating regions
-   * @param rpcFactory to create an {@link RpcController} to talk to the regionserver
+   * @param rpcFactory to create an {@link com.google.protobuf.RpcController} 
+   *        to talk to the regionserver
    * @param replicaId the replica id
    */
   public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
@@ -79,7 +81,8 @@ public class ReversedScannerCallable extends ScannerCallable {
 
   /**
    * @deprecated use
-   *             {@link #ReversedScannerCallable(ClusterConnection, TableName, Scan, ScanMetrics, byte[], RpcControllerFactory )}
+   *  {@link #ReversedScannerCallable(ClusterConnection, TableName, Scan, 
+   *     ScanMetrics, byte[], RpcControllerFactory )}
    */
   @Deprecated
   public ReversedScannerCallable(ClusterConnection connection, TableName tableName,

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
index 7a6133a..0aecef2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
@@ -95,7 +95,8 @@ public class ScannerCallable extends RegionServerCallable<Result[]> {
    * @param scan the scan to execute
    * @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable won't collect
    *          metrics
-   * @param rpcControllerFactory factory to use when creating {@link RpcController}
+   * @param rpcControllerFactory factory to use when creating 
+   *        {@link com.google.protobuf.RpcController}
    */
   public ScannerCallable (ClusterConnection connection, TableName tableName, Scan scan,
       ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
index 690a398..43efb66 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
@@ -30,8 +30,9 @@ import com.google.protobuf.Message;
  * Defines how value for specific column is interpreted and provides utility
  * methods like compare, add, multiply etc for them. Takes column family, column
  * qualifier and return the cell value. Its concrete implementation should
- * handle null case gracefully. Refer to {@link LongColumnInterpreter} for an
- * example.
+ * handle null case gracefully. 
+ * Refer to {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter} 
+ * for an example.
  * <p>
  * Takes two generic parameters and three Message parameters. 
  * The cell value type of the interpreter is <T>.
@@ -127,7 +128,8 @@ Q extends Message, R extends Message> {
    * server side to construct the ColumnInterpreter. The server
    * will pass this to the {@link #initialize}
    * method. If there is no ColumnInterpreter specific data (for e.g.,
-   * {@link LongColumnInterpreter}) then null should be returned.
+   * {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter})
+   *  then null should be returned.
    * @return the PB message
    */
   public abstract P getRequestData();

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java
index 4856cc0..fb183f1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java
@@ -41,7 +41,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * <p>
  * Multiple filters can be combined using {@link FilterList}.
  * <p>
- * If an already known column qualifier is looked for, use {@link Get#addColumn}
+ * If an already known column qualifier is looked for, 
+ * use {@link org.apache.hadoop.hbase.client.Get#addColumn}
  * directly rather than a filter.
  */
 @InterfaceAudience.Public

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
index 9cc28f7..cb4337e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
@@ -40,7 +40,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * <p>
  * Multiple filters can be combined using {@link FilterList}.
  * <p>
- * If an already known row range needs to be scanned, use {@link Scan} start
+ * If an already known row range needs to be scanned, 
+ * use {@link org.apache.hadoop.hbase.CellScanner} start
  * and stop rows directly rather than a filter.
  */
 @InterfaceAudience.Public

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
index 897f029..d905868 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
@@ -52,7 +52,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * long value), then you can pass in your own comparator instead.
  * <p>
  * You must also specify a family and qualifier.  Only the value of this column
- * will be tested. When using this filter on a {@link Scan} with specified
+ * will be tested. When using this filter on a 
+ * {@link org.apache.hadoop.hbase.CellScanner} with specified
  * inputs, the column to be tested should also be added as input (otherwise
  * the filter will regard the column as missing).
  * <p>

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java
index f3d81f7..8ec1517 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java
@@ -36,8 +36,8 @@ import com.google.protobuf.ServiceException;
 
 /**
  * Base class which provides clients with an RPC connection to
- * call coprocessor endpoint {@link Service}s.  Note that clients should not use this class
- * directly, except through
+ * call coprocessor endpoint {@link com.google.protobuf.Service}s.  
+ * Note that clients should not use this class directly, except through
  * {@link org.apache.hadoop.hbase.client.HTableInterface#coprocessorService(byte[])}.
  */
 @InterfaceAudience.Public

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java
index 01661ed..46356f8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java
@@ -23,7 +23,8 @@ import org.apache.hadoop.ipc.RemoteException;
 
 /**
  * A {@link RemoteException} with some extra information.  If source exception
- * was a {@link DoNotRetryIOException}, {@link #isDoNotRetry()} will return true.
+ * was a {@link org.apache.hadoop.hbase.DoNotRetryIOException}, 
+ * {@link #isDoNotRetry()} will return true.
  * <p>A {@link RemoteException} hosts exceptions we got from the server.
  */
 @SuppressWarnings("serial")

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
index b9b31f9..5511cb1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
@@ -29,11 +29,11 @@ import com.google.protobuf.RpcController;
 /**
  * Used for server-side protobuf RPC service invocations.  This handler allows
  * invocation exceptions to easily be passed through to the RPC server from coprocessor
- * {@link Service} implementations.
+ * {@link com.google.protobuf.Service} implementations.
  *
  * <p>
- * When implementing {@link Service} defined methods, coprocessor endpoints can use the following
- * pattern to pass exceptions back to the RPC client:
+ * When implementing {@link com.google.protobuf.Service} defined methods, 
+ * coprocessor endpoints can use the following pattern to pass exceptions back to the RPC client:
  * <code>
  * public void myMethod(RpcController controller, MyRequest request, RpcCallback<MyResponse> done) {
  *   MyResponse response = null;
@@ -53,7 +53,8 @@ import com.google.protobuf.RpcController;
 public class ServerRpcController implements RpcController {
   /**
    * The exception thrown within
-   * {@link Service#callMethod(Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)},
+   * {@link com.google.protobuf.Service#callMethod(
+   *   Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)},
    * if any.
    */
   // TODO: it would be good widen this to just Throwable, but IOException is what we allow now
@@ -97,7 +98,7 @@ public class ServerRpcController implements RpcController {
   }
 
   /**
-   * Sets an exception to be communicated back to the {@link Service} client.
+   * Sets an exception to be communicated back to the {@link com.google.protobuf.Service} client.
    * @param ioe the exception encountered during execution of the service method
    */
   public void setFailedOn(IOException ioe) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java
index 04041a8..9d0319b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java
@@ -281,7 +281,7 @@ public class PoolMap<K, V> implements Map<K, V> {
 
   /**
    * The <code>ReusablePool</code> represents a {@link PoolMap.Pool} that builds
-   * on the {@link LinkedList} class. It essentially allows resources to be
+   * on the {@link java.util.LinkedList} class. It essentially allows resources to be
    * checked out, at which point it is removed from this pool. When the resource
    * is no longer required, it should be returned to the pool in order to be
    * reused.

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java
index c924506..de44ec6 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java
@@ -28,13 +28,14 @@ import org.apache.hadoop.hbase.io.CellOutputStream;
 /**
  * Encoder/Decoder for Cell.
  *
- * <p>Like {@link DataBlockEncoder} only Cell-based rather than KeyValue version 1 based
+ * <p>Like {@link org.apache.hadoop.hbase.io.encoding.DataBlockEncoder} 
+ * only Cell-based rather than KeyValue version 1 based
  * and without presuming an hfile context.  Intent is an Interface that will work for hfile and
  * rpc.
  */
 @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
 public interface Codec {
-  // TODO: interfacing with {@link DataBlockEncoder}
+  // TODO: interfacing with {@link org.apache.hadoop.hbase.io.encoding.DataBlockEncoder}
   /**
    * Call flush when done.  Some encoders may not put anything on the stream until flush is called.
    * On flush, let go of any resources used by the encoder.

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
index 3c69d98..34f1bf7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
@@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  * and flushes, or to build a byte[] to send to the client. This could be backed by a
  * List<KeyValue>, but more efficient implementations will append results to a
  * byte[] to eliminate overhead, and possibly encode the cells further.
- * <p>To read Cells, use {@link CellScanner}
- * @see CellScanner
+ * <p>To read Cells, use {@link org.apache.hadoop.hbase.CellScanner}
+ * @see org.apache.hadoop.hbase.CellScanner
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-common/src/main/java/org/apache/hadoop/hbase/types/DataType.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/DataType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/DataType.java
index da434d2..2f98ebf 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/DataType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/DataType.java
@@ -32,7 +32,8 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
  * qualifiers.
  * </p>
  * <p>
- * {@code DataType}s are different from Hadoop {@link Writable}s in two
+ * {@code DataType}s are different from Hadoop 
+ * {@link org.apache.hadoop.hbase.io.ImmutableBytesWritable}s in two
  * significant ways. First, {@code DataType} describes how to serialize a
  * value, it does not encapsulate a serialized value. Second, {@code DataType}
  * implementations provide hints to consumers about relationships between the

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesFixedLength.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesFixedLength.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesFixedLength.java
index 90697db..bfd6416 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesFixedLength.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesFixedLength.java
@@ -24,9 +24,11 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
 
 /**
  * An {@code DataType} that encodes fixed-length values encoded using
- * {@link Bytes#putBytes(byte[], int, byte[], int, int)}. Intended to make it
- * easier to transition away from direct use of {@link Bytes}.
- * @see Bytes#putBytes(byte[], int, byte[], int, int)
+ * {@link org.apache.hadoop.hbase.util.Bytes#putBytes(
+ * byte[], int, byte[], int, int)}. Intended to make it
+ * easier to transition away from direct use of 
+ * {@link org.apache.hadoop.hbase.util.Bytes}.
+ * @see org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)
  * @see RawBytes
  * @see OrderedBlob
  * @see OrderedBlobVar

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesTerminated.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesTerminated.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesTerminated.java
index 292318b..8bc4c20 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesTerminated.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesTerminated.java
@@ -24,10 +24,10 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
 
 /**
  * An {@code DataType} that encodes variable-length values encoded using
- * {@link Bytes#putBytes(byte[], int, byte[], int, int)}. Includes a
- * termination marker following the raw {@code byte[]} value. Intended to
- * make it easier to transition away from direct use of {@link Bytes}.
- * @see Bytes#putBytes(byte[], int, byte[], int, int)
+ * {@link org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)}. 
+ * Includes a termination marker following the raw {@code byte[]} value. Intended to make it easier 
+ * to transition away from direct use of {@link org.apache.hadoop.hbase.util.Bytes}.
+ * @see org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)
  * @see RawBytes
  * @see OrderedBlob
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringFixedLength.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringFixedLength.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringFixedLength.java
index c5774c0..d11bead 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringFixedLength.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringFixedLength.java
@@ -23,10 +23,11 @@ import org.apache.hadoop.hbase.util.Order;
 
 /**
  * An {@code DataType} that encodes fixed-length values encoded using
- * {@link Bytes#toBytes(String)}. Intended to make it easier to transition
- * away from direct use of {@link Bytes}.
- * @see Bytes#toBytes(String)
- * @see Bytes#toString(byte[], int, int)
+ * {@link org.apache.hadoop.hbase.util.Bytes#toBytes(String)}. 
+ * Intended to make it easier to transition away from direct use of 
+ * {@link org.apache.hadoop.hbase.util.Bytes}.
+ * @see org.apache.hadoop.hbase.util.Bytes#toBytes(String)
+ * @see org.apache.hadoop.hbase.util.Bytes#toString(byte[], int, int)
  * @see RawString
  */
 @InterfaceAudience.Public

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringTerminated.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringTerminated.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringTerminated.java
index a954ec3..4d89d5b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringTerminated.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringTerminated.java
@@ -23,11 +23,12 @@ import org.apache.hadoop.hbase.util.Order;
 
 /**
  * An {@code DataType} that encodes variable-length values encoded using
- * {@link Bytes#toBytes(String)}. Includes a termination marker following the
+ * {@link org.apache.hadoop.hbase.util.Bytes#toBytes(String)}. 
+ * Includes a termination marker following the
  * raw {@code byte[]} value. Intended to make it easier to transition
- * away from direct use of {@link Bytes}.
- * @see Bytes#toBytes(String)
- * @see Bytes#toString(byte[], int, int)
+ * away from direct use of {@link org.apache.hadoop.hbase.util.Bytes}.
+ * @see org.apache.hadoop.hbase.util.Bytes#toBytes(String)
+ * @see org.apache.hadoop.hbase.util.Bytes#toString(byte[], int, int)
  * @see RawString
  * @see OrderedString
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
index bc09988..8d3d0cf 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
@@ -35,7 +35,7 @@ import com.google.common.annotations.VisibleForTesting;
 public abstract class AbstractPositionedByteRange extends AbstractByteRange implements
     PositionedByteRange {
   /**
-   * The current index into the range. Like {@link ByteBuffer} position, it
+   * The current index into the range. Like {@link java.nio.ByteBuffer} position, it
    * points to the next value that will be read/written in the array. It
    * provides the appearance of being 0-indexed, even though its value is
    * calculated according to offset.
@@ -182,7 +182,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
   @Override
   public abstract int putVLong(long val);
   /**
-   * Similar to {@link ByteBuffer#flip()}. Sets length to position, position to
+   * Similar to {@link java.nio.ByteBuffer#flip()}. Sets length to position, position to
    * offset.
    */
   @VisibleForTesting
@@ -194,7 +194,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
   }
 
   /**
-   * Similar to {@link ByteBuffer#clear()}. Sets position to 0, length to
+   * Similar to {@link java.nio.ByteBuffer#clear()}. Sets position to 0, length to
    * capacity.
    */
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentIndex.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentIndex.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentIndex.java
index 46e583a..3b4a1f1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentIndex.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentIndex.java
@@ -33,7 +33,7 @@ import com.google.common.base.Supplier;
 
 /**
  * A simple concurrent map of sets. This is similar in concept to
- * {@link Multiset}, with the following exceptions:
+ * {@link com.google.common.collect.Multiset}, with the following exceptions:
  * <ul>
  *   <li>The set is thread-safe and concurrent: no external locking or
  *   synchronization is required. This is important for the use case where
@@ -109,7 +109,7 @@ public class ConcurrentIndex<K, V> {
    * associated. <b>Note:</b> if the caller wishes to add or removes values
    * to under the specified as they're iterating through the returned value,
    * they should make a defensive copy; otherwise, a
-   * {@link ConcurrentModificationException} may be thrown.
+   * {@link java.util.ConcurrentModificationException} may be thrown.
    * @param key The key
    * @return All values associated with the specified key or null if no values
    *         are associated with the key.

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
index e7bd17e..3c47d86 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  * their own index into the array.
  * </p>
  * <p>
- * Designed to be a slimmed-down, mutable alternative to {@link ByteBuffer}.
+ * Designed to be a slimmed-down, mutable alternative to {@link java.nio.ByteBuffer}.
  * </p>
  */
 @InterfaceAudience.Public

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
index 79a8ff8..a65b27d 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
@@ -51,7 +51,8 @@ import org.apache.zookeeper.ZooKeeper;
  * This is an example showing how a RegionObserver could configured
  * via ZooKeeper in order to control a Region compaction, flush, and scan policy.
  *
- * This also demonstrated the use of shared {@link RegionObserver} state.
+ * This also demonstrated the use of shared 
+ * {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} state.
  * See {@link RegionCoprocessorEnvironment#getSharedData()}.
  *
  * This would be useful for an incremental backup tool, which would indicate the last
@@ -59,7 +60,8 @@ import org.apache.zookeeper.ZooKeeper;
  * inserted since (based on wall clock time). 
  *
  * This implements org.apache.zookeeper.Watcher directly instead of using
- * {@link ZooKeeperWatcher}, because RegionObservers come and go and currently
+ * {@link org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher}, 
+ * because RegionObservers come and go and currently
  * listeners registered with ZooKeeperWatcher cannot be removed.
  */
 public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java
index 6929f01..09a6659 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java
@@ -35,9 +35,11 @@ import org.apache.zookeeper.KeeperException;
  * {@link BaseHFileCleanerDelegate} that only cleans HFiles that don't belong to a table that is
  * currently being archived.
  * <p>
- * This only works properly if the {@link TimeToLiveHFileCleaner} is also enabled (it always should
- * be), since it may take a little time for the ZK notification to propagate, in which case we may
- * accidentally delete some files.
+ * This only works properly if the 
+ * {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner}
+ *  is also enabled (it always should be), since it may take a little time
+ *  for the ZK notification to propagate, in which case we may accidentally
+ *  delete some files.
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
 public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
index 2ffdf70..baf2aa6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
@@ -49,7 +49,8 @@ import org.apache.hadoop.hbase.util.FSUtils;
  * <p>
  * This also allows one to run the scan from an
  * online or offline hbase cluster. The snapshot files can be exported by using the
- * {@link ExportSnapshot} tool, to a pure-hdfs cluster, and this scanner can be used to
+ * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, 
+ * to a pure-hdfs cluster, and this scanner can be used to 
  * run the scan directly over the snapshot files. The snapshot should not be deleted while there
  * are open scanners reading from snapshot files.
  *
@@ -65,7 +66,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
  * snapshot files, the job has to be run as the HBase user or the user must have group or other
  * priviledges in the filesystem (See HBASE-8369). Note that, given other users access to read from
  * snapshot/data files will completely circumvent the access control enforced by HBase.
- * @see TableSnapshotInputFormat
+ * @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
index 49a7a4b..31746b6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
@@ -20,10 +20,12 @@ package org.apache.hadoop.hbase.constraint;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
- * Exception that a user defined constraint throws on failure of a {@link Put}.
- * <p>
- * Does <b>NOT</b> attempt the {@link Put} multiple times, since the constraint
- * <it>should</it> fail every time for the same {@link Put} (it should be
+ * Exception that a user defined constraint throws on failure of a
+ *  {@link org.apache.hadoop.hbase.client.Put}.
+ * <p>Does <b>NOT</b> attempt the
+ *  {@link org.apache.hadoop.hbase.client.Put} multiple times, 
+ *  since the constraint <it>should</it> fail every time for 
+ *  the same {@link org.apache.hadoop.hbase.client.Put} (it should be
  * idempotent).
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
index 0c7563f..917df5b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java
@@ -42,11 +42,11 @@ import com.google.common.annotations.VisibleForTesting;
  * <P>
  * Methods required for task life circle: <BR>
  * {@link #markRegionsRecovering(ServerName, Set)} mark regions for log replaying. Used by
- * {@link MasterFileSystem} <BR>
+ * {@link org.apache.hadoop.hbase.master.MasterFileSystem} <BR>
  * {@link #removeRecoveringRegions(Set, Boolean)} make regions cleanup that previous were marked as
  * recovering. Called after all tasks processed <BR>
  * {@link #removeStaleRecoveringRegions(Set)} remove stale recovering. called by
- * {@link MasterFileSystem} after Active Master is initialized <BR>
+ * {@link org.apache.hadoop.hbase.master.MasterFileSystem} after Active Master is initialized <BR>
  * {@link #getLastRecoveryTime()} required for garbage collector and should indicate when the last
  * recovery has been made<BR>
  * {@link #checkTaskStillAvailable(String)} Check that task is still there <BR>

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java
index a925574..707850d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java
@@ -33,7 +33,8 @@ import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor;
 import com.google.common.annotations.VisibleForTesting;
 
 /**
- * Coordinated operations for {@link SplitLogWorker} and {@link WALSplitterHandler} Important
+ * Coordinated operations for {@link SplitLogWorker} and 
+ * {@link org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler} Important
  * methods for SplitLogWorker: <BR>
  * {@link #isReady()} called from {@link SplitLogWorker#run()} to check whether the coordination is
  * ready to supply the tasks <BR>

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
index 411b7f9..4f511f0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
@@ -67,7 +67,8 @@ import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.data.Stat;
 
 /**
- * ZooKeeper based implementation of {@link SplitLogManagerCoordination}
+ * ZooKeeper based implementation of
+ *  {@link org.apache.hadoop.hbase.master.SplitLogManagerCoordination}
  */
 @InterfaceAudience.Private
 public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
@@ -682,7 +683,8 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
 
   /**
    * ZooKeeper implementation of
-   * {@link SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
+   * {@link org.apache.hadoop.hbase.master.
+   * SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
    */
   @Override
   public void removeStaleRecoveringRegions(final Set<String> knownFailedServers)
@@ -904,9 +906,10 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
 
 
   /**
-   * {@link SplitLogManager} can use objects implementing this interface to finish off a partially
-   * done task by {@link SplitLogWorker}. This provides a serialization point at the end of the task
-   * processing. Must be restartable and idempotent.
+   * {@link org.apache.hadoop.hbase.master.SplitLogManager} can use objects implementing this 
+   * interface to finish off a partially done task by 
+   * {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}. This provides a 
+   * serialization point at the end of the task processing. Must be restartable and idempotent.
    */
   public interface TaskFinisher {
     /**
@@ -1067,7 +1070,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
    * Asynchronous handler for zk create RESCAN-node results. Retries on failures.
    * <p>
    * A RESCAN node is created using PERSISTENT_SEQUENTIAL flag. It is a signal for all the
-   * {@link SplitLogWorker}s to rescan for new tasks.
+   * {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}s to rescan for new tasks.
    */
   public class CreateRescanAsyncCallback implements AsyncCallback.StringCallback {
     private final Log LOG = LogFactory.getLog(CreateRescanAsyncCallback.class);

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
index 3dcde66..9ea6bd7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
@@ -583,8 +583,8 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
    * Next part is related to WALSplitterHandler
    */
   /**
-   * endTask() can fail and the only way to recover out of it is for the {@link SplitLogManager} to
-   * timeout the task node.
+   * endTask() can fail and the only way to recover out of it is for the 
+   * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node.
    * @param slt
    * @param ctr
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index 21c01ec..9fede52 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -212,8 +212,9 @@ public interface RegionObserver extends Coprocessor {
    * options:
    * <ul>
    * <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned
-   * from this method. The custom scanner can then inspect {@link KeyValue}s from the wrapped
-   * scanner, applying its own policy to what gets written.</li>
+   * from this method. The custom scanner can then inspect
+   *  {@link org.apache.hadoop.hbase.KeyValue}s from the wrapped scanner, applying its own
+   *   policy to what gets written.</li>
    * <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a
    * custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations
    * bypassing core compaction using this approach must write out new store files themselves or the
@@ -238,8 +239,9 @@ public interface RegionObserver extends Coprocessor {
    * options:
    * <ul>
    * <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned
-   * from this method. The custom scanner can then inspect {@link KeyValue}s from the wrapped
-   * scanner, applying its own policy to what gets written.</li>
+   * from this method. The custom scanner can then inspect
+   *  {@link org.apache.hadoop.hbase.KeyValue}s from the wrapped scanner, applying its own
+   *   policy to what gets written.</li>
    * <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a
    * custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations
    * bypassing core compaction using this approach must write out new store files themselves or the
@@ -269,7 +271,8 @@ public interface RegionObserver extends Coprocessor {
    * effect in this hook.
    * @param c the environment provided by the region server
    * @param store the store being compacted
-   * @param scanners the list {@link StoreFileScanner}s to be read from
+   * @param scanners the list {@link org.apache.hadoop.hbase.regionserver.StoreFileScanner}s
+   *  to be read from
    * @param scanType the {@link ScanType} indicating whether this is a major or minor compaction
    * @param earliestPutTs timestamp of the earliest put that was found in any of the involved store
    *          files
@@ -293,7 +296,8 @@ public interface RegionObserver extends Coprocessor {
    * effect in this hook.
    * @param c the environment provided by the region server
    * @param store the store being compacted
-   * @param scanners the list {@link StoreFileScanner}s to be read from
+   * @param scanners the list {@link org.apache.hadoop.hbase.regionserver.StoreFileScanner}s
+   *  to be read from
    * @param scanType the {@link ScanType} indicating whether this is a major or minor compaction
    * @param earliestPutTs timestamp of the earliest put that was found in any of the involved store
    *          files

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index ef74844..667873b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -339,9 +339,9 @@ public class HFileBlock implements Cacheable {
   /**
    * Returns the buffer this block stores internally. The clients must not
    * modify the buffer object. This method has to be public because it is
-   * used in {@link CompoundBloomFilter} to avoid object creation on every
-   * Bloom filter lookup, but has to be used with caution. Checksum data
-   * is not included in the returned buffer but header data is.
+   * used in {@link org.apache.hadoop.hbase.util.CompoundBloomFilter} to avoid object
+   *  creation on every Bloom filter lookup, but has to be used with caution.
+   *   Checksum data is not included in the returned buffer but header data is.
    *
    * @return the buffer of this block for read-only operations
    */
@@ -354,7 +354,7 @@ public class HFileBlock implements Cacheable {
   /**
    * Returns the buffer of this block, including header data. The clients must
    * not modify the buffer object. This method has to be public because it is
-   * used in {@link BucketCache} to avoid buffer copy.
+   * used in {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache} to avoid buffer copy.
    *
    * @return the buffer with header and checksum included for read-only operations
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 57c2dfa..9413364 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -54,9 +54,9 @@ import org.apache.hadoop.util.StringUtils;
  * ({@link BlockIndexReader}) single-level and multi-level block indexes.
  *
  * Examples of how to use the block index writer can be found in
- * {@link CompoundBloomFilterWriter} and {@link HFileWriterV2}. Examples of how
- * to use the reader can be found in {@link HFileReaderV2} and
- * TestHFileBlockIndex.
+ * {@link org.apache.hadoop.hbase.util.CompoundBloomFilterWriter} and
+ *  {@link HFileWriterV2}. Examples of how to use the reader can be
+ *  found in {@link HFileReaderV2} and TestHFileBlockIndex.
  */
 @InterfaceAudience.Private
 public class HFileBlockIndex {

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
index 99da135..1ad91e3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
@@ -131,7 +131,7 @@ public interface HFileScanner {
    */
   ByteBuffer getValue();
   /**
-   * @return Instance of {@link KeyValue}.
+   * @return Instance of {@link org.apache.hadoop.hbase.KeyValue}.
    */
   Cell getKeyValue();
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index 4f40d0a..82df5f7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -57,16 +57,16 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * {@link ConcurrentHashMap} and with a non-blocking eviction thread giving
  * constant-time {@link #cacheBlock} and {@link #getBlock} operations.<p>
  *
- * Contains three levels of block priority to allow for
- * scan-resistance and in-memory families {@link HColumnDescriptor#setInMemory(boolean)} (An
- * in-memory column family is a column family that should be served from memory if possible):
+ * Contains three levels of block priority to allow for scan-resistance and in-memory families 
+ * {@link org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean)} (An in-memory column 
+ * family is a column family that should be served from memory if possible):
  * single-access, multiple-accesses, and in-memory priority.
  * A block is added with an in-memory priority flag if
- * {@link HColumnDescriptor#isInMemory()}, otherwise a block becomes a single access
- * priority the first time it is read into this block cache.  If a block is accessed again while
- * in cache, it is marked as a multiple access priority block.  This delineation of blocks is used
- * to prevent scans from thrashing the cache adding a least-frequently-used
- * element to the eviction algorithm.<p>
+ * {@link org.apache.hadoop.hbase.HColumnDescriptor#isInMemory()}, otherwise a block becomes a
+ *  single access priority the first time it is read into this block cache.  If a block is
+ *  accessed again while in cache, it is marked as a multiple access priority block.  This
+ *  delineation of blocks is used to prevent scans from thrashing the cache adding a 
+ *  least-frequently-used element to the eviction algorithm.<p>
  *
  * Each priority is given its own chunk of the total cache to ensure
  * fairness during eviction.  Each priority will retain close to its maximum

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index f1310a9..d3b303a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -82,8 +82,8 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}
  *
  * <p>BucketCache can be used as mainly a block cache (see
- * {@link CombinedBlockCache}), combined with LruBlockCache to decrease CMS GC and
- * heap fragmentation.
+ * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with 
+ * LruBlockCache to decrease CMS GC and heap fragmentation.
  *
  * <p>It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store
  * blocks) to enlarge cache space via

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
index b75e4a6..fd9a60c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.util.ToolRunner;
 /**
  * A job with a map to count rows.
  * Map outputs table rows IF the input row has columns that have content.
- * Uses an {@link IdentityReducer}
+ * Uses a org.apache.hadoop.mapred.lib.IdentityReducer
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
index d2d754a..fbfd984 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
@@ -103,9 +103,11 @@ implements InputFormat<ImmutableBytesWritable, Result> {
    * Calculates the splits that will serve as input for the map tasks.
    * <ul>
    * Splits are created in number equal to the smallest between numSplits and
-   * the number of {@link HRegion}s in the table. If the number of splits is
-   * smaller than the number of {@link HRegion}s then splits are spanned across
-   * multiple {@link HRegion}s and are grouped the most evenly possible. In the
+   * the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table. 
+   * If the number of splits is smaller than the number of 
+   * {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits are spanned across
+   * multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s 
+   * and are grouped the most evenly possible. In the
    * case splits are uneven the bigger splits are placed first in the
    * {@link InputSplit} array.
    *

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
index f0944b6..281d13e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
@@ -59,7 +59,7 @@ implements RecordReader<ImmutableBytesWritable, Result> {
   }
 
   /**
-   * @param htable the {@link HTable} to scan.
+   * @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
    */
   public void setHTable(Table htable) {
     this.recordReaderImpl.setHTable(htable);

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
index 1f6ed0d..c577c54 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
@@ -113,7 +113,7 @@ public class TableRecordReaderImpl {
     return this.startRow;
   }
   /**
-   * @param htable the {@link HTable} to scan.
+   * @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
    */
   public void setHTable(Table htable) {
     Configuration conf = htable.getConfiguration();

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java
index 96b42cd..763d802 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java
@@ -32,7 +32,8 @@ import org.apache.hadoop.mapreduce.RecordReader;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 
 /**
- * Simple {@link InputFormat} for {@link WAL} files.
+ * Simple {@link InputFormat} for {@link org.apache.hadoop.hbase.wal.WAL} 
+ * files.
  * @deprecated use {@link WALInputFormat}
  */
 @Deprecated

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java
index 0160d21..ec3192e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java
@@ -59,9 +59,9 @@ extends TableReducer<Writable, Mutation, Writable> {
 
   /**
    * Writes each given record, consisting of the row key and the given values,
-   * to the configured {@link OutputFormat}. It is emitting the row key and each
-   * {@link org.apache.hadoop.hbase.client.Put Put} or
-   * {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs.
+   * to the configured {@link org.apache.hadoop.mapreduce.OutputFormat}. 
+   * It is emitting the row key and each {@link org.apache.hadoop.hbase.client.Put Put} 
+   * or {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs.
    *
    * @param key  The current row key.
    * @param values  The {@link org.apache.hadoop.hbase.client.Put Put} or

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
index 9cb3de6..47f6869 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
@@ -119,7 +119,7 @@ public class TableRecordReaderImpl {
   /**
    * Sets the HBase table.
    *
-   * @param htable  The {@link HTable} to scan.
+   * @param htable  The {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
    */
   public void setHTable(Table htable) {
     Configuration conf = htable.getConfiguration();

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
index 7aff03a..44d88c5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
@@ -49,9 +49,9 @@ import com.google.common.annotations.VisibleForTesting;
  * wals, etc) directly to provide maximum performance. The snapshot is not required to be
  * restored to the live cluster or cloned. This also allows to run the mapreduce job from an
  * online or offline hbase cluster. The snapshot files can be exported by using the
- * {@link ExportSnapshot} tool, to a pure-hdfs cluster, and this InputFormat can be used to
- * run the mapreduce job directly over the snapshot files. The snapshot should not be deleted
- * while there are jobs reading from snapshot files.
+ * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster, 
+ * and this InputFormat can be used to run the mapreduce job directly over the snapshot files. 
+ * The snapshot should not be deleted while there are jobs reading from snapshot files.
  * <p>
  * Usage is similar to TableInputFormat, and
  * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job,
@@ -68,8 +68,8 @@ import com.google.common.annotations.VisibleForTesting;
  * <p>
  * Internally, this input format restores the snapshot into the given tmp directory. Similar to
  * {@link TableInputFormat} an InputSplit is created per region. The region is opened for reading
- * from each RecordReader. An internal RegionScanner is used to execute the {@link Scan} obtained
- * from the user.
+ * from each RecordReader. An internal RegionScanner is used to execute the 
+ * {@link org.apache.hadoop.hbase.CellScanner} obtained from the user.
  * <p>
  * HBase owns all the data and snapshot files on the filesystem. Only the 'hbase' user can read from
  * snapshot files and data files.
@@ -79,7 +79,7 @@ import com.google.common.annotations.VisibleForTesting;
  * user or the user must have group or other privileges in the filesystem (See HBASE-8369).
  * Note that, given other users access to read from snapshot/data files will completely circumvent
  * the access control enforced by HBase.
- * @see TableSnapshotScanner
+ * @see org.apache.hadoop.hbase.client.TableSnapshotScanner
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 6c073d7..e5d63b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -150,7 +150,7 @@ import com.google.protobuf.Service;
  *
  * <p>You can also shutdown just this master.  Call {@link #stopMaster()}.
  *
- * @see Watcher
+ * @see org.apache.zookeeper.Watcher
  */
 @InterfaceAudience.Private
 @SuppressWarnings("deprecation")

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index 11ac7c6..bc798cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -74,8 +74,9 @@ import com.google.common.annotations.VisibleForTesting;
  * <p>SplitLogManager monitors the tasks that it creates using the
  * timeoutMonitor thread. If a task's progress is slow then
  * {@link SplitLogManagerCoordination#checkTasks} will take away the
- * task from the owner {@link SplitLogWorker} and the task will be up for grabs again. When the
- * task is done then it is deleted by SplitLogManager.
+ * task from the owner {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} 
+ * and the task will be up for grabs again. When the task is done then it is 
+ * deleted by SplitLogManager.
  *
  * <p>Clients call {@link #splitLogDistributed(Path)} to split a region server's
  * log files. The caller thread waits in this method until all the log files

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 6d7ed31..1385444 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -60,8 +60,9 @@ import com.google.common.collect.Sets;
 
 /**
  * The base class for load balancers. It provides the the functions used to by
- * {@link AssignmentManager} to assign regions in the edge cases. It doesn't
- * provide an implementation of the actual balancing algorithm.
+ * {@link org.apache.hadoop.hbase.master.AssignmentManager} to assign regions 
+ * in the edge cases. It doesn't provide an implementation of the 
+ * actual balancing algorithm.
  *
  */
 public abstract class BaseLoadBalancer implements LoadBalancer {

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
index c1209c8..6db82a5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
@@ -41,16 +41,17 @@ import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan.Position;
 import org.apache.hadoop.hbase.util.Pair;
 
 /**
- * An implementation of the {@link LoadBalancer} that assigns favored nodes for
- * each region. There is a Primary RegionServer that hosts the region, and then
- * there is Secondary and Tertiary RegionServers. Currently, the favored nodes
- * information is used in creating HDFS files - the Primary RegionServer passes
- * the primary, secondary, tertiary node addresses as hints to the DistributedFileSystem
- * API for creating files on the filesystem. These nodes are treated as hints by
- * the HDFS to place the blocks of the file. This alleviates the problem to do with
- * reading from remote nodes (since we can make the Secondary RegionServer as the new
- * Primary RegionServer) after a region is recovered. This should help provide consistent
- * read latencies for the regions even when their primary region servers die.
+ * An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that 
+ * assigns favored nodes for each region. There is a Primary RegionServer that hosts 
+ * the region, and then there is Secondary and Tertiary RegionServers. Currently, the 
+ * favored nodes information is used in creating HDFS files - the Primary RegionServer 
+ * passes the primary, secondary, tertiary node addresses as hints to the 
+ * DistributedFileSystem API for creating files on the filesystem. These nodes are 
+ * treated as hints by the HDFS to place the blocks of the file. This alleviates the 
+ * problem to do with reading from remote nodes (since we can make the Secondary 
+ * RegionServer as the new Primary RegionServer) after a region is recovered. This 
+ * should help provide consistent read latencies for the regions even when their 
+ * primary region servers die.
  *
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
index 164c418..9673acf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
@@ -51,7 +51,8 @@ import com.google.common.collect.MinMaxPriorityQueue;
  * <p>On cluster startup, bulk assignment can be used to determine
  * locations for all Regions in a cluster.
  *
- * <p>This classes produces plans for the {@link AssignmentManager} to execute.
+ * <p>This classes produces plans for the 
+ * {@link org.apache.hadoop.hbase.master.AssignmentManager} to execute.
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
 public class SimpleLoadBalancer extends BaseLoadBalancer {

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/LogReplayHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/LogReplayHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/LogReplayHandler.java
index 0655f1f..008a04e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/LogReplayHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/LogReplayHandler.java
@@ -33,9 +33,10 @@ import org.apache.hadoop.hbase.master.MasterServices;
 /**
  * Handle logReplay work from SSH. Having a separate handler is not to block SSH in re-assigning
  * regions from dead servers. Otherwise, available SSH handlers could be blocked by logReplay work
- * (from {@link MasterFileSystem#splitLog(ServerName)}). During logReplay, if a receiving RS(say A)
- * fails again, regions on A won't be able to be assigned to another live RS which causes the log
- * replay unable to complete because WAL edits replay depends on receiving RS to be live
+ * (from {@link org.apache.hadoop.hbase.master.MasterFileSystem#splitLog(ServerName)}). 
+ * During logReplay, if a receiving RS(say A) fails again, regions on A won't be able 
+ * to be assigned to another live RS which causes the log replay unable to complete 
+ * because WAL edits replay depends on receiving RS to be live
  */
 @InterfaceAudience.Private
 public class LogReplayHandler extends EventHandler {

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java
index c181e27..1a30de4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 
 /**
  * Implementors tote an HRegionInfo instance.
- * This is a marker interface that can be put on {@link EventHandler}s that
+ * This is a marker interface that can be put on {@link java.beans.EventHandler}s that
  * have an {@link HRegionInfo}.
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index df0b94b..b21f4e7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -98,7 +98,8 @@ public final class MasterSnapshotVerifier {
   /**
    * Verify that the snapshot in the directory is a valid snapshot
    * @param snapshotDir snapshot directory to check
-   * @param snapshotServers {@link ServerName} of the servers that are involved in the snapshot
+   * @param snapshotServers {@link org.apache.hadoop.hbase.ServerName} of the servers 
+   *        that are involved in the snapshot
    * @throws CorruptedSnapshotException if the snapshot is invalid
    * @throws IOException if there is an unexpected connection issue to the filesystem
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
index 98f9b11..8f866f6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
@@ -34,20 +34,23 @@ import org.apache.zookeeper.KeeperException;
 *
 * To implement a custom globally barriered procedure, user needs to extend two classes:
 * {@link MasterProcedureManager} and {@link RegionServerProcedureManager}. Implementation of
-* {@link MasterProcedureManager} is loaded into {@link HMaster} process via configuration
-* parameter 'hbase.procedure.master.classes', while implementation of
-* {@link RegionServerProcedureManager} is loaded into {@link HRegionServer} process via
+* {@link MasterProcedureManager} is loaded into {@link org.apache.hadoop.hbase.master.HMaster} 
+* process via configuration parameter 'hbase.procedure.master.classes', while implementation of
+* {@link RegionServerProcedureManager} is loaded into 
+* {@link org.apache.hadoop.hbase.regionserver.HRegionServer} process via
 * configuration parameter 'hbase.procedure.regionserver.classes'.
 *
-* An example of globally barriered procedure implementation is {@link SnapshotManager} and
-* {@link RegionServerSnapshotManager}.
+* An example of globally barriered procedure implementation is 
+* {@link org.apache.hadoop.hbase.master.snapshot.SnapshotManager} and
+* {@link org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager}.
 *
 * A globally barriered procedure is identified by its signature (usually it is the name of the
 * procedure znode). During the initialization phase, the initialize methods are called by both
-* {@link HMaster} and {@link HRegionServer} witch create the procedure znode and register the
-* listeners. A procedure can be triggered by its signature and an instant name (encapsulated in
-* a {@link ProcedureDescription} object). When the servers are shutdown, the stop methods on both
-* classes are called to clean up the data associated with the procedure.
+* {@link org.apache.hadoop.hbase.master.HMaster} 
+* and {@link org.apache.hadoop.hbase.regionserver.HRegionServer} which create the procedure znode 
+* and register the listeners. A procedure can be triggered by its signature and an instant name 
+* (encapsulated in a {@link ProcedureDescription} object). When the servers are shutdown, 
+* the stop methods on both classes are called to clean up the data associated with the procedure.
 */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java
index 8b41603..8545189 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java
@@ -27,8 +27,8 @@ import org.apache.zookeeper.KeeperException;
 
 /**
  * Provides the globally barriered procedure framework and environment for
- * master oriented operations. {@link HMaster} interacts with the loaded
- * procedure manager through this class.
+ * master oriented operations. {@link org.apache.hadoop.hbase.master.HMaster} 
+ * interacts with the loaded procedure manager through this class.
  */
 public class MasterProcedureManagerHost extends
     ProcedureManagerHost<MasterProcedureManager> {

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
index 7610ff6..0f4ea64 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
@@ -29,7 +29,8 @@ import org.apache.zookeeper.KeeperException;
 
 /**
  * Provides the globally barriered procedure framework and environment
- * for region server oriented operations.  {@link HRegionServer} interacts
+ * for region server oriented operations. 
+ * {@link org.apache.hadoop.hbase.regionserver.HRegionServer} interacts
  * with the loaded procedure manager through this class.
  */
 public class RegionServerProcedureManagerHost extends

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
index 67f7147..a2284dd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
@@ -23,8 +23,10 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 /**
  * Wraps together the mutations which are applied as a batch to the region and their operation
  * status and WALEdits. 
- * @see RegionObserver#preBatchMutate(ObserverContext, MiniBatchOperationInProgress)
- * @see RegionObserver#postBatchMutate(ObserverContext, MiniBatchOperationInProgress)
+ * @see org.apache.hadoop.hbase.coprocessor.RegionObserver#preBatchMutate(
+ * ObserverContext, MiniBatchOperationInProgress)
+ * @see org.apache.hadoop.hbase.coprocessor.RegionObserver#postBatchMutate(
+ * ObserverContext, MiniBatchOperationInProgress)
  * @param <T> Pair<Mutation, Integer> pair of Mutations and associated rowlock ids .
  */
 @InterfaceAudience.Private
@@ -61,7 +63,8 @@ public class MiniBatchOperationInProgress<T> {
 
   /**
    * Sets the status code for the operation(Mutation) at the specified position.
-   * By setting this status, {@link RegionObserver} can make HRegion to skip Mutations.
+   * By setting this status, {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} 
+   * can make HRegion to skip Mutations.
    * @param index
    * @param opStatus
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
index 20a2482..bb352b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
@@ -54,7 +54,8 @@ public interface RegionScanner extends InternalScanner {
   boolean reseek(byte[] row) throws IOException;
 
   /**
-   * @return The preferred max buffersize. See {@link Scan#setMaxResultSize(long)}
+   * @return The preferred max buffersize. See 
+   * {@link org.apache.hadoop.hbase.client.Scan#setMaxResultSize(long)}
    */
   long getMaxResultSize();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a03d73/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
index 4932f32..eeffa8b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
@@ -45,9 +45,9 @@ import com.google.common.annotations.VisibleForTesting;
 
 /**
  * This worker is spawned in every regionserver, including master. The Worker waits for log
- * splitting tasks to be put up by the {@link SplitLogManager} running in the master and races with
- * other workers in other serves to acquire those tasks. The coordination is done via coordination
- * engine.
+ * splitting tasks to be put up by the {@link org.apache.hadoop.hbase.master.SplitLogManager} 
+ * running in the master and races with other workers in other serves to acquire those tasks. 
+ * The coordination is done via coordination engine.
  * <p>
  * If a worker has successfully moved the task from state UNASSIGNED to OWNED then it owns the task.
  * It keeps heart beating the manager by periodically moving the task from UNASSIGNED to OWNED
@@ -186,7 +186,8 @@ public class SplitLogWorker implements Runnable {
    * acquired by a {@link SplitLogWorker}. Since there isn't a water-tight
    * guarantee that two workers will not be executing the same task therefore it
    * is better to have workers prepare the task and then have the
-   * {@link SplitLogManager} commit the work in SplitLogManager.TaskFinisher
+   * {@link org.apache.hadoop.hbase.master.SplitLogManager} commit the work in 
+   * SplitLogManager.TaskFinisher
    */
   public interface TaskExecutor {
     enum Status {