You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by nd...@apache.org on 2017/12/01 04:41:50 UTC

[1/6] hbase git commit: bump version to 1.1.13

Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 926021447 -> c64bf8a9f


bump version to 1.1.13


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b6ff3743
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b6ff3743
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b6ff3743

Branch: refs/heads/branch-1.1
Commit: b6ff374346da142bdde46dd89498e47ba28dd465
Parents: 9260214
Author: Nick Dimiduk <nd...@apache.org>
Authored: Thu Nov 30 19:34:00 2017 -0800
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Thu Nov 30 19:34:00 2017 -0800

----------------------------------------------------------------------
 hbase-annotations/pom.xml                          | 2 +-
 hbase-assembly/pom.xml                             | 2 +-
 hbase-checkstyle/pom.xml                           | 4 ++--
 hbase-client/pom.xml                               | 2 +-
 hbase-common/pom.xml                               | 2 +-
 hbase-examples/pom.xml                             | 2 +-
 hbase-hadoop-compat/pom.xml                        | 2 +-
 hbase-hadoop2-compat/pom.xml                       | 2 +-
 hbase-it/pom.xml                                   | 2 +-
 hbase-prefix-tree/pom.xml                          | 2 +-
 hbase-procedure/pom.xml                            | 2 +-
 hbase-protocol/pom.xml                             | 2 +-
 hbase-resource-bundle/pom.xml                      | 2 +-
 hbase-rest/pom.xml                                 | 2 +-
 hbase-server/pom.xml                               | 2 +-
 hbase-shaded/hbase-shaded-check-invariants/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-client/pom.xml           | 2 +-
 hbase-shaded/hbase-shaded-server/pom.xml           | 2 +-
 hbase-shaded/pom.xml                               | 2 +-
 hbase-shell/pom.xml                                | 2 +-
 hbase-testing-util/pom.xml                         | 2 +-
 hbase-thrift/pom.xml                               | 2 +-
 pom.xml                                            | 2 +-
 23 files changed, 24 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-annotations/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index 0aa992a..75c9edb 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 024fcaf..09e1c1b 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
   <artifactId>hbase-assembly</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-checkstyle/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-checkstyle/pom.xml b/hbase-checkstyle/pom.xml
index 87b2308..db01952 100644
--- a/hbase-checkstyle/pom.xml
+++ b/hbase-checkstyle/pom.xml
@@ -22,14 +22,14 @@
 <modelVersion>4.0.0</modelVersion>
 <groupId>org.apache.hbase</groupId>
 <artifactId>hbase-checkstyle</artifactId>
-<version>1.1.13-SNAPSHOT</version>
+<version>1.1.13</version>
 <name>Apache HBase - Checkstyle</name>
 <description>Module to hold Checkstyle properties for HBase.</description>
 
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-client/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index 06606cf..47a2e5e 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-common/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index 7b7d211..786f50b 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-examples/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index af814d8..fecbd68 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
   <artifactId>hbase-examples</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-hadoop-compat/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/pom.xml b/hbase-hadoop-compat/pom.xml
index ae199f3..3cbfdfa 100644
--- a/hbase-hadoop-compat/pom.xml
+++ b/hbase-hadoop-compat/pom.xml
@@ -23,7 +23,7 @@
     <parent>
         <artifactId>hbase</artifactId>
         <groupId>org.apache.hbase</groupId>
-        <version>1.1.13-SNAPSHOT</version>
+        <version>1.1.13</version>
         <relativePath>..</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-hadoop2-compat/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml
index 577cc5f..3e5e4f2 100644
--- a/hbase-hadoop2-compat/pom.xml
+++ b/hbase-hadoop2-compat/pom.xml
@@ -21,7 +21,7 @@ limitations under the License.
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-it/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml
index 2a9eb62..ceb2abe 100644
--- a/hbase-it/pom.xml
+++ b/hbase-it/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-prefix-tree/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/pom.xml b/hbase-prefix-tree/pom.xml
index 87de9f0..395694d 100644
--- a/hbase-prefix-tree/pom.xml
+++ b/hbase-prefix-tree/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-procedure/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-procedure/pom.xml b/hbase-procedure/pom.xml
index 938f822..628f263 100644
--- a/hbase-procedure/pom.xml
+++ b/hbase-procedure/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-protocol/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml
index 8a5fe6a..8944fe6 100644
--- a/hbase-protocol/pom.xml
+++ b/hbase-protocol/pom.xml
@@ -23,7 +23,7 @@
     <parent>
         <artifactId>hbase</artifactId>
         <groupId>org.apache.hbase</groupId>
-        <version>1.1.13-SNAPSHOT</version>
+        <version>1.1.13</version>
         <relativePath>..</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-resource-bundle/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-resource-bundle/pom.xml b/hbase-resource-bundle/pom.xml
index 1cd12b1..648b798 100644
--- a/hbase-resource-bundle/pom.xml
+++ b/hbase-resource-bundle/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-rest/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml
index cb39eb3..9cb9386 100644
--- a/hbase-rest/pom.xml
+++ b/hbase-rest/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
   <artifactId>hbase-rest</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 78c4f49..770dce7 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
   <artifactId>hbase-server</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-shaded/hbase-shaded-check-invariants/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-shaded/hbase-shaded-check-invariants/pom.xml b/hbase-shaded/hbase-shaded-check-invariants/pom.xml
index 6a8a5b5..72e313b 100644
--- a/hbase-shaded/hbase-shaded-check-invariants/pom.xml
+++ b/hbase-shaded/hbase-shaded-check-invariants/pom.xml
@@ -16,7 +16,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>../..</relativePath>
   </parent>
   <artifactId>hbase-shaded-check-invariants</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-shaded/hbase-shaded-client/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-shaded/hbase-shaded-client/pom.xml b/hbase-shaded/hbase-shaded-client/pom.xml
index a2a0a56..4b62412 100644
--- a/hbase-shaded/hbase-shaded-client/pom.xml
+++ b/hbase-shaded/hbase-shaded-client/pom.xml
@@ -22,7 +22,7 @@
     <parent>
         <artifactId>hbase-shaded</artifactId>
         <groupId>org.apache.hbase</groupId>
-        <version>1.1.13-SNAPSHOT</version>
+        <version>1.1.13</version>
         <relativePath>..</relativePath>
     </parent>
     <artifactId>hbase-shaded-client</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-shaded/hbase-shaded-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-shaded/hbase-shaded-server/pom.xml b/hbase-shaded/hbase-shaded-server/pom.xml
index 93393a5..ac4d5ea 100644
--- a/hbase-shaded/hbase-shaded-server/pom.xml
+++ b/hbase-shaded/hbase-shaded-server/pom.xml
@@ -22,7 +22,7 @@
     <parent>
         <artifactId>hbase-shaded</artifactId>
         <groupId>org.apache.hbase</groupId>
-        <version>1.1.13-SNAPSHOT</version>
+        <version>1.1.13</version>
         <relativePath>..</relativePath>
     </parent>
     <artifactId>hbase-shaded-server</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-shaded/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-shaded/pom.xml b/hbase-shaded/pom.xml
index cc4c187..fc52aa0 100644
--- a/hbase-shaded/pom.xml
+++ b/hbase-shaded/pom.xml
@@ -23,7 +23,7 @@
     <parent>
         <artifactId>hbase</artifactId>
         <groupId>org.apache.hbase</groupId>
-        <version>1.1.13-SNAPSHOT</version>
+        <version>1.1.13</version>
         <relativePath>..</relativePath>
     </parent>
     <artifactId>hbase-shaded</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-shell/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml
index 5dc8838..7f08323 100644
--- a/hbase-shell/pom.xml
+++ b/hbase-shell/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
   <artifactId>hbase-shell</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-testing-util/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-testing-util/pom.xml b/hbase-testing-util/pom.xml
index 2a79f07..9f604ad 100644
--- a/hbase-testing-util/pom.xml
+++ b/hbase-testing-util/pom.xml
@@ -23,7 +23,7 @@
     <parent>
         <artifactId>hbase</artifactId>
         <groupId>org.apache.hbase</groupId>
-        <version>1.1.13-SNAPSHOT</version>
+        <version>1.1.13</version>
         <relativePath>..</relativePath>
     </parent>
     <artifactId>hbase-testing-util</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-thrift/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml
index 2afe3c5..d36ea28 100644
--- a/hbase-thrift/pom.xml
+++ b/hbase-thrift/pom.xml
@@ -23,7 +23,7 @@
   <parent>
     <artifactId>hbase</artifactId>
     <groupId>org.apache.hbase</groupId>
-    <version>1.1.13-SNAPSHOT</version>
+    <version>1.1.13</version>
     <relativePath>..</relativePath>
   </parent>
   <artifactId>hbase-thrift</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index ea6ae0a..24fa213 100644
--- a/pom.xml
+++ b/pom.xml
@@ -39,7 +39,7 @@
   <groupId>org.apache.hbase</groupId>
   <artifactId>hbase</artifactId>
   <packaging>pom</packaging>
-  <version>1.1.13-SNAPSHOT</version>
+  <version>1.1.13</version>
   <name>Apache HBase</name>
   <description>
     Apache HBase™ is the Hadoop database. Use it when you need


[3/6] hbase git commit: updating docs from master

Posted by nd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/developer.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc
index 6a546fb..0ada9a6 100644
--- a/src/main/asciidoc/_chapters/developer.adoc
+++ b/src/main/asciidoc/_chapters/developer.adoc
@@ -46,7 +46,7 @@ As Apache HBase is an Apache Software Foundation project, see <<asf,asf>>
 === Mailing Lists
 
 Sign up for the dev-list and the user-list.
-See the link:http://hbase.apache.org/mail-lists.html[mailing lists] page.
+See the link:https://hbase.apache.org/mail-lists.html[mailing lists] page.
 Posing questions - and helping to answer other people's questions - is encouraged! There are varying levels of experience on both lists so patience and politeness are encouraged (and please stay on topic.)
 
 [[slack]]
@@ -64,7 +64,7 @@ FreeNode offers a web-based client, but most people prefer a native client, and
 
 === Jira
 
-Check for existing issues in link:https://issues.apache.org/jira/browse/HBASE[Jira].
+Check for existing issues in link:https://issues.apache.org/jira/projects/HBASE/issues[Jira].
 If it's either a new feature request, enhancement, or a bug, file a ticket.
 
 We track multiple types of work in JIRA:
@@ -173,8 +173,8 @@ GIT is our repository of record for all but the Apache HBase website.
 We used to be on SVN.
 We migrated.
 See link:https://issues.apache.org/jira/browse/INFRA-7768[Migrate Apache HBase SVN Repos to Git].
-See link:http://hbase.apache.org/source-repository.html[Source Code
-                Management] page for contributor and committer links or search for HBase on the link:http://git.apache.org/[Apache Git] page.
+See link:https://hbase.apache.org/source-repository.html[Source Code
+                Management] page for contributor and committer links or search for HBase on the link:https://git.apache.org/[Apache Git] page.
 
 == IDEs
 
@@ -479,8 +479,7 @@ mvn -DskipTests package assembly:single deploy
 
 If you see `Unable to find resource 'VM_global_library.vm'`, ignore it.
 It's not an error.
-It is link:http://jira.codehaus.org/browse/MSITE-286[officially
-                        ugly] though.
+It is link:https://issues.apache.org/jira/browse/MSITE-286[officially ugly] though.
 
 [[releasing]]
 == Releasing Apache HBase
@@ -540,35 +539,30 @@ For the build to sign them for you, you a properly configured _settings.xml_ in
 
 [[maven.release]]
 === Making a Release Candidate
-
-NOTE: These instructions are for building HBase 1.y.z
-
-.Point Releases
-If you are making a point release (for example to quickly address a critical incompatibility or security problem) off of a release branch instead of a development branch, the tagging instructions are slightly different.
-I'll prefix those special steps with _Point Release Only_.
+Only committers may make releases of hbase artifacts.
 
 .Before You Begin
-Before you make a release candidate, do a practice run by deploying a snapshot.
-Before you start, check to be sure recent builds have been passing for the branch from where you are going to take your release.
-You should also have tried recent branch tips out on a cluster under load, perhaps by running the `hbase-it` integration test suite for a few hours to 'burn in' the near-candidate bits.
-
-.Point Release Only
+Make sure your environment is properly set up. Maven and Git are the main tooling
+used in the below. You'll need a properly configured _settings.xml_ file in your
+local _~/.m2_ maven repository with logins for apache repos (See <<maven.settings.xml>>).
+You will also need to have a published signing key. Browse the Hadoop
+link:http://wiki.apache.org/hadoop/HowToRelease[How To Release] wiki page on
+how to release. It is a model for most of the instructions below. It often has more
+detail on particular steps, for example, on adding your code signing key to the
+project KEYS file up in Apache or on how to update JIRA in preparation for release.
+
+Before you make a release candidate, do a practice run by deploying a SNAPSHOT.
+Check to be sure recent builds have been passing for the branch from where you
+are going to take your release. You should also have tried recent branch tips
+out on a cluster under load, perhaps by running the `hbase-it` integration test
+suite for a few hours to 'burn in' the near-candidate bits.
+
+
+.Specifying the Heap Space for Maven
 [NOTE]
 ====
-At this point you should tag the previous release branch (ex: 0.96.1) with the new point release tag (e.g.
-0.96.1.1 tag). Any commits with changes for the point release should go against the new tag.
-====
-
-The Hadoop link:http://wiki.apache.org/hadoop/HowToRelease[How To
-                    Release] wiki page is used as a model for most of the instructions below.
-                    Although it now stale, it may have more detail on particular sections, so
-                    it is worth review especially if you get stuck.
-
-.Specifying the Heap Space for Maven on OSX
-[NOTE]
-====
-On OSX, you may run into OutOfMemoryErrors building, particularly building the site and
-documentation. Up the heap and permgen space for Maven by setting the `MAVEN_OPTS` variable.
+You may run into OutOfMemoryErrors building, particularly building the site and
+documentation. Up the heap for Maven by setting the `MAVEN_OPTS` variable.
 You can prefix the variable to the Maven command, as in the following example:
 
 ----
@@ -579,10 +573,19 @@ You could also set this in an environment variable or alias in your shell.
 ====
 
 
-NOTE: The script _dev-support/make_rc.sh_ automates many of these steps.
-It does not do the modification of the _CHANGES.txt_                    for the release, the close of the staging repository in Apache Maven (human intervention is needed here), the checking of the produced artifacts to ensure they are 'good' -- e.g.
-extracting the produced tarballs, verifying that they look right, then starting HBase and checking that everything is running correctly, then the signing and pushing of the tarballs to link:http://people.apache.org[people.apache.org].
-The script handles everything else, and comes in handy.
+[NOTE]
+====
+The script _dev-support/make_rc.sh_ automates many of the below steps.
+It will checkout a tag, clean the checkout, build src and bin tarballs,
+and deploy the built jars to repository.apache.org.
+It does NOT do the modification of the _CHANGES.txt_ for the release,
+the checking of the produced artifacts to ensure they are 'good' --
+e.g. extracting the produced tarballs, verifying that they
+look right, then starting HBase and checking that everything is running
+correctly -- or the signing and pushing of the tarballs to
+link:https://people.apache.org[people.apache.org].
+Take a look. Modify/improve as you see fit.
+====
 
 .Procedure: Release Procedure
 . Update the _CHANGES.txt_ file and the POM files.
@@ -593,63 +596,123 @@ Adjust the version in all the POM files appropriately.
 If you are making a release candidate, you must remove the `-SNAPSHOT` label from all versions
 in all pom.xml files.
 If you are running this receipe to publish a snapshot, you must keep the `-SNAPSHOT` suffix on the hbase version.
-The link:http://mojo.codehaus.org/versions-maven-plugin/[Versions
-                            Maven Plugin] can be of use here.
+The link:http://www.mojohaus.org/versions-maven-plugin/[Versions Maven Plugin] can be of use here.
 To set a version in all the many poms of the hbase multi-module project, use a command like the following:
 +
 [source,bourne]
 ----
-
-$ mvn clean org.codehaus.mojo:versions-maven-plugin:1.3.1:set -DnewVersion=0.96.0
+$ mvn clean org.codehaus.mojo:versions-maven-plugin:2.5:set -DnewVersion=2.1.0-SNAPSHOT
 ----
 +
-Make sure all versions in poms are changed! Checkin the _CHANGES.txt_ and any version changes.
+Make sure all versions in poms are changed! Checkin the _CHANGES.txt_ and any maven version changes.
 
 . Update the documentation.
 +
 Update the documentation under _src/main/asciidoc_.
-This usually involves copying the latest from master and making version-particular
+This usually involves copying the latest from master branch and making version-particular
 adjustments to suit this release candidate version.
 
-. Build the source tarball.
+. Clean the checkout dir
 +
-Now, build the source tarball.
-This tarball is Hadoop-version-independent.
-It is just the pure source code and documentation without a particular hadoop taint, etc.
-Add the `-Prelease` profile when building.
-It checks files for licenses and will fail the build if unlicensed files are present.
+[source,bourne]
+----
+
+$ mvn clean
+$ git clean -f -x -d
+----
+
+
+. Run Apache-Rat
+Check licenses are good
 +
 [source,bourne]
 ----
 
-$ mvn clean install -DskipTests assembly:single -Dassembly.file=hbase-assembly/src/main/assembly/src.xml -Prelease
+$ mvn apache-rat
 ----
 +
-Extract the tarball and make sure it looks good.
-A good test for the src tarball being 'complete' is to see if you can build new tarballs from this source bundle.
-If the source tarball is good, save it off to a _version directory_, a directory somewhere where you are collecting all of the tarballs you will publish as part of the release candidate.
-For example if you were building an hbase-0.96.0 release candidate, you might call the directory _hbase-0.96.0RC0_.
-Later you will publish this directory as our release candidate.
+If the above fails, check the rat log.
 
-. Build the binary tarball.
 +
-Next, build the binary tarball.
-Add the `-Prelease`                        profile when building.
-It checks files for licenses and will fail the build if unlicensed files are present.
-Do it in two steps.
+[source,bourne]
+----
+$ grep 'Rat check' patchprocess/mvn_apache_rat.log
+----
 +
-* First install into the local repository
+
+. Create a release tag.
+Presuming you have run basic tests, the rat check, passes and all is
+looking good, now is the time to tag the release candidate (You
+always remove the tag if you need to redo). To tag, do
+what follows substituting in the version appropriate to your build.
+All tags should be signed tags; i.e. pass the _-s_ option (See
+link:http://https://git-scm.com/book/id/v2/Git-Tools-Signing-Your-Work[Signing Your Work]
+for how to set up your git environment for signing).
+
 +
 [source,bourne]
 ----
 
-$ mvn clean install -DskipTests -Prelease
+$ git tag -s 2.0.0-alpha4-RC0 -m "Tagging the 2.0.0-alpha4 first Releae Candidate (Candidates start at zero)"
+----
+
+Or, if you are making a release, tags should have a _rel/_ prefix to ensure
+they are preserved in the Apache repo as in:
+
+[source,bourne]
+----
++$ git tag -s rel/2.0.0-alpha4 -m "Tagging the 2.0.0-alpha4 Release"
 ----
 
-* Next, generate documentation and assemble the tarball.
+Push the (specific) tag (only) so others have access.
++
+[source,bourne]
+----
+
+$ git push origin 2.0.0-alpha4-RC0
+----
++
+For how to delete tags, see
+link:http://www.manikrathee.com/how-to-delete-a-tag-in-git.html[How to Delete a Tag]. Covers
+deleting tags that have not yet been pushed to the remote Apache
+repo as well as delete of tags pushed to Apache.
+
+
+. Build the source tarball.
++
+Now, build the source tarball. Lets presume we are building the source
+tarball for the tag _2.0.0-alpha4-RC0_ into _/tmp/hbase-2.0.0-alpha4-RC0/_
+(This step requires that the mvn and git clean steps described above have just been done).
 +
 [source,bourne]
 ----
+$ git archive --format=tar.gz --output="/tmp/hbase-2.0.0-alpha4-RC0/hbase-2.0.0-alpha4-src.tar.gz" --prefix="hbase-2.0.0-alpha4/" $git_tag
+----
+
+Above we generate the hbase-2.0.0-alpha4-src.tar.gz tarball into the
+_/tmp/hbase-2.0.0-alpha4-RC0_ build output directory (We don't want the _RC0_ in the name or prefix.
+These bits are currently a release candidate but if the VOTE passes, they will become the release so we do not taint
+the artifact names with _RCX_).
+
+. Build the binary tarball.
+Next, build the binary tarball. Add the `-Prelease` profile when building.
+It runs the license apache-rat check among other rules that help ensure
+all is wholesome. Do it in two steps.
+
+First install into the local repository
+
+[source,bourne]
+----
+
+$ mvn clean install -DskipTests -Prelease
+----
+
+Next, generate documentation and assemble the tarball. Be warned,
+this next step can take a good while, a couple of hours generating site
+documentation.
+
+[source,bourne]
+----
 
 $ mvn install -DskipTests site assembly:single -Prelease
 ----
@@ -659,26 +722,23 @@ Otherwise, the build complains that hbase modules are not in the maven repositor
 when you try to do it all in one step, especially on a fresh repository.
 It seems that you need the install goal in both steps.
 +
-Extract the generated tarball and check it out.
+Extract the generated tarball -- you'll find it under
+_hbase-assembly/target_ and check it out.
 Look at the documentation, see if it runs, etc.
-If good, copy the tarball to the above mentioned _version directory_.
+If good, copy the tarball beside the source tarball in the
+build output directory.
 
-. Create a new tag.
-+
-.Point Release Only
-[NOTE]
-====
-The following step that creates a new tag can be skipped since you've already created the point release tag
-====
-+
-Tag the release at this point since it looks good.
-If you find an issue later, you can delete the tag and start over.
-Release needs to be tagged for the next step.
 
 . Deploy to the Maven Repository.
 +
-Next, deploy HBase to the Apache Maven repository, using the `apache-release` profile instead of the `release` profile when running the `mvn deploy` command.
-This profile invokes the Apache pom referenced by our pom files, and also signs your artifacts published to Maven, as long as the _settings.xml_ is configured correctly, as described in <<maven.settings.xml>>.
+Next, deploy HBase to the Apache Maven repository. Add the
+apache-release` profile when running the `mvn deploy` command.
+This profile comes from the Apache parent pom referenced by our pom files.
+It does signing of your artifacts published to Maven, as long as the
+_settings.xml_ is configured correctly, as described in <<maven.settings.xml>>.
+This step depends on the local repository having been populate
+by the just-previous bin tarball build.
+
 +
 [source,bourne]
 ----
@@ -692,16 +752,24 @@ More work needs to be done on these maven artifacts to make them generally avail
 We do not release HBase tarball to the Apache Maven repository. To avoid deploying the tarball, do not
 include the `assembly:single` goal in your `mvn deploy` command. Check the deployed artifacts as described in the next section.
 
+.make_rc.sh
+[NOTE]
+====
+If you run the _dev-support/make_rc.sh_ script, this is as far as it takes you.
+To finish the release, take up the script from here on out.
+====
+
 . Make the Release Candidate available.
 +
 The artifacts are in the maven repository in the staging area in the 'open' state.
 While in this 'open' state you can check out what you've published to make sure all is good.
-To do this, log in to Apache's Nexus at link:http://repository.apache.org[repository.apache.org] using your Apache ID.
+To do this, log in to Apache's Nexus at link:https://repository.apache.org[repository.apache.org] using your Apache ID.
 Find your artifacts in the staging repository. Click on 'Staging Repositories' and look for a new one ending in "hbase" with a status of 'Open', select it.
 Use the tree view to expand the list of repository contents and inspect if the artifacts you expect are present. Check the POMs.
 As long as the staging repo is open you can re-upload if something is missing or built incorrectly.
 +
 If something is seriously wrong and you would like to back out the upload, you can use the 'Drop' button to drop and delete the staging repository.
+Sometimes the upload fails in the middle. This is another reason you might have to 'Drop' the upload from the staging repository.
 +
 If it checks out, close the repo using the 'Close' button. The repository must be closed before a public URL to it becomes available. It may take a few minutes for the repository to close. Once complete you'll see a public URL to the repository in the Nexus UI. You may also receive an email with the URL. Provide the URL to the temporary staging repository in the email that announces the release candidate.
 (Folks will need to add this repo URL to their local poms or to their local _settings.xml_ file to pull the published release candidate artifacts.)
@@ -716,39 +784,25 @@ Check it out and run its simple test to make sure maven artifacts are properly d
 Be sure to edit the pom to point to the proper staging repository.
 Make sure you are pulling from the repository when tests run and that you are not getting from your local repository, by either passing the `-U` flag or deleting your local repo content and check maven is pulling from remote out of the staging repository.
 ====
-+
-See link:http://www.apache.org/dev/publishing-maven-artifacts.html[Publishing Maven Artifacts] for some pointers on this maven staging process.
-+
-NOTE: We no longer publish using the maven release plugin.
-Instead we do +mvn deploy+.
-It seems to give us a backdoor to maven release publishing.
-If there is no _-SNAPSHOT_ on the version string, then we are 'deployed' to the apache maven repository staging directory from which we can publish URLs for candidates and later, if they pass, publish as release (if a _-SNAPSHOT_ on the version string, deploy will put the artifacts up into apache snapshot repos).
-+
+
+See link:https://www.apache.org/dev/publishing-maven-artifacts.html[Publishing Maven Artifacts] for some pointers on this maven staging process.
+
 If the HBase version ends in `-SNAPSHOT`, the artifacts go elsewhere.
 They are put into the Apache snapshots repository directly and are immediately available.
 Making a SNAPSHOT release, this is what you want to happen.
 
-. If you used the _make_rc.sh_ script instead of doing
-  the above manually, do your sanity checks now.
-+
-At this stage, you have two tarballs in your 'version directory' and a set of artifacts in a staging area of the maven repository, in the 'closed' state.
-These are publicly accessible in a temporary staging repository whose URL you should have gotten in an email.
-The above mentioned script, _make_rc.sh_ does all of the above for you minus the check of the artifacts built, the closing of the staging repository up in maven, and the tagging of the release.
-If you run the script, do your checks at this stage verifying the src and bin tarballs and checking what is up in staging using hbase-downstreamer project.
-Tag before you start the build.
-You can always delete it if the build goes haywire.
-
-. Sign, fingerprint and then 'stage' your release candiate version directory via svnpubsub by committing your directory to link:https://dist.apache.org/repos/dist/dev/hbase/[The 'dev' distribution directory] (See comments on link:https://issues.apache.org/jira/browse/HBASE-10554[HBASE-10554 Please delete old releases from mirroring system] but in essence it is an svn checkout of https://dist.apache.org/repos/dist/dev/hbase -- releases are at https://dist.apache.org/repos/dist/release/hbase). In the _version directory_ run the following commands:
-+
+At this stage, you have two tarballs in your 'build output directory' and a set of artifacts in a staging area of the maven repository, in the 'closed' state.
+Next sign, fingerprint and then 'stage' your release candiate build output directory via svnpubsub by committing
+your directory to link:https://dist.apache.org/repos/dist/dev/hbase/[The 'dev' distribution directory] (See comments on link:https://issues.apache.org/jira/browse/HBASE-10554[HBASE-10554 Please delete old releases from mirroring system] but in essence it is an svn checkout of https://dist.apache.org/repos/dist/dev/hbase -- releases are at https://dist.apache.org/repos/dist/release/hbase). In the _version directory_ run the following commands:
+
 [source,bourne]
 ----
 
-$ for i in *.tar.gz; do echo $i; gpg --print-mds $i > $i.mds ; done
 $ for i in *.tar.gz; do echo $i; gpg --print-md MD5 $i > $i.md5 ; done
 $ for i in *.tar.gz; do echo $i; gpg --print-md SHA512 $i > $i.sha ; done
 $ for i in *.tar.gz; do echo $i; gpg --armor --output $i.asc --detach-sig $i  ; done
 $ cd ..
-# Presuming our 'version directory' is named 0.96.0RC0, copy it to the svn checkout of the dist dev dir
+# Presuming our 'build output directory' is named 0.96.0RC0, copy it to the svn checkout of the dist dev dir
 # in this case named hbase.dist.dev.svn
 $ cd /Users/stack/checkouts/hbase.dist.dev.svn
 $ svn info
@@ -815,7 +869,7 @@ This plugin is run when you specify the +site+ goal as in when you run +mvn site
 See <<appendix_contributing_to_documentation,appendix contributing to documentation>> for more information on building the documentation.
 
 [[hbase.org]]
-== Updating link:http://hbase.apache.org[hbase.apache.org]
+== Updating link:https://hbase.apache.org[hbase.apache.org]
 
 [[hbase.org.site.contributing]]
 === Contributing to hbase.apache.org
@@ -823,7 +877,7 @@ See <<appendix_contributing_to_documentation,appendix contributing to documentat
 See <<appendix_contributing_to_documentation,appendix contributing to documentation>> for more information on contributing to the documentation or website.
 
 [[hbase.org.site.publishing]]
-=== Publishing link:http://hbase.apache.org[hbase.apache.org]
+=== Publishing link:https://hbase.apache.org[hbase.apache.org]
 
 See <<website_publish>> for instructions on publishing the website and documentation.
 
@@ -920,7 +974,7 @@ Also, keep in mind that if you are running tests in the `hbase-server` module yo
 === Unit Tests
 
 Apache HBase test cases are subdivided into four categories: small, medium, large, and
-integration with corresponding JUnit link:http://www.junit.org/node/581[categories]: `SmallTests`, `MediumTests`, `LargeTests`, `IntegrationTests`.
+integration with corresponding JUnit link:https://github.com/junit-team/junit4/wiki/Categories[categories]: `SmallTests`, `MediumTests`, `LargeTests`, `IntegrationTests`.
 JUnit categories are denoted using java annotations and look like this in your unit test code.
 
 [source,java]
@@ -1223,7 +1277,7 @@ $ mvn clean install test -Dtest=TestZooKeeper  -PskipIntegrationTests
 ==== Running integration tests against mini cluster
 
 HBase 0.92 added a `verify` maven target.
-Invoking it, for example by doing `mvn verify`, will run all the phases up to and including the verify phase via the maven link:http://maven.apache.org/plugins/maven-failsafe-plugin/[failsafe
+Invoking it, for example by doing `mvn verify`, will run all the phases up to and including the verify phase via the maven link:https://maven.apache.org/plugins/maven-failsafe-plugin/[failsafe
                         plugin], running all the above mentioned HBase unit tests as well as tests that are in the HBase integration test group.
 After you have completed +mvn install -DskipTests+ You can run just the integration tests by invoking:
 
@@ -1278,7 +1332,7 @@ Currently there is no support for running integration tests against a distribute
 The tests interact with the distributed cluster by using the methods in the `DistributedHBaseCluster` (implementing `HBaseCluster`) class, which in turn uses a pluggable `ClusterManager`.
 Concrete implementations provide actual functionality for carrying out deployment-specific and environment-dependent tasks (SSH, etc). The default `ClusterManager` is `HBaseClusterManager`, which uses SSH to remotely execute start/stop/kill/signal commands, and assumes some posix commands (ps, etc). Also assumes the user running the test has enough "power" to start/stop servers on the remote machines.
 By default, it picks up `HBASE_SSH_OPTS`, `HBASE_HOME`, `HBASE_CONF_DIR` from the env, and uses `bin/hbase-daemon.sh` to carry out the actions.
-Currently tarball deployments, deployments which uses _hbase-daemons.sh_, and link:http://incubator.apache.org/ambari/[Apache Ambari]                    deployments are supported.
+Currently tarball deployments, deployments which uses _hbase-daemons.sh_, and link:https://incubator.apache.org/ambari/[Apache Ambari]                    deployments are supported.
 _/etc/init.d/_ scripts are not supported for now, but it can be easily added.
 For other deployment options, a ClusterManager can be implemented and plugged in.
 
@@ -1286,7 +1340,7 @@ For other deployment options, a ClusterManager can be implemented and plugged in
 ==== Destructive integration / system tests (ChaosMonkey)
 
 HBase 0.96 introduced a tool named `ChaosMonkey`, modeled after
-link:http://techblog.netflix.com/2012/07/chaos-monkey-released-into-wild.html[same-named tool by Netflix's Chaos Monkey tool].
+link:https://netflix.github.io/chaosmonkey/[same-named tool by Netflix's Chaos Monkey tool].
 ChaosMonkey simulates real-world
 faults in a running cluster by killing or disconnecting random servers, or injecting
 other failures into the environment. You can use ChaosMonkey as a stand-alone tool
@@ -1790,10 +1844,10 @@ The script checks the directory for sub-directory called _.git/_, before proceed
 === Submitting Patches
 
 If you are new to submitting patches to open source or new to submitting patches to Apache, start by
- reading the link:http://commons.apache.org/patches.html[On Contributing Patches] page from
- link:http://commons.apache.org/[Apache Commons Project].
+ reading the link:https://commons.apache.org/patches.html[On Contributing Patches] page from
+ link:https://commons.apache.org/[Apache Commons Project].
 It provides a nice overview that applies equally to the Apache HBase Project.
-link:http://accumulo.apache.org/git.html[Accumulo doc on how to contribute and develop] is also
+link:https://accumulo.apache.org/git.html[Accumulo doc on how to contribute and develop] is also
 good read to understand development workflow.
 
 [[submitting.patches.create]]
@@ -1887,11 +1941,11 @@ Significant new features should provide an integration test in addition to unit
 [[reviewboard]]
 ==== ReviewBoard
 
-Patches larger than one screen, or patches that will be tricky to review, should go through link:http://reviews.apache.org[ReviewBoard].
+Patches larger than one screen, or patches that will be tricky to review, should go through link:https://reviews.apache.org[ReviewBoard].
 
 .Procedure: Use ReviewBoard
 . Register for an account if you don't already have one.
-  It does not use the credentials from link:http://issues.apache.org[issues.apache.org].
+  It does not use the credentials from link:https://issues.apache.org[issues.apache.org].
   Log in.
 . Click [label]#New Review Request#.
 . Choose the `hbase-git` repository.
@@ -1917,8 +1971,8 @@ For more information on how to use ReviewBoard, see link:http://www.reviewboard.
 
 New committers are encouraged to first read Apache's generic committer documentation:
 
-* link:http://www.apache.org/dev/new-committers-guide.html[Apache New Committer Guide]
-* link:http://www.apache.org/dev/committers.html[Apache Committer FAQ]
+* link:https://www.apache.org/dev/new-committers-guide.html[Apache New Committer Guide]
+* link:https://www.apache.org/dev/committers.html[Apache Committer FAQ]
 
 ===== Review
 
@@ -1934,7 +1988,7 @@ Use the btn:[Submit Patch]                        button in JIRA, just like othe
 
 ===== Reject
 
-Patches which do not adhere to the guidelines in link:https://wiki.apache.org/hadoop/Hbase/HowToCommit/hadoop/Hbase/HowToContribute#[HowToContribute] and to the link:https://wiki.apache.org/hadoop/Hbase/HowToCommit/hadoop/CodeReviewChecklist#[code review checklist] should be rejected.
+Patches which do not adhere to the guidelines in link:https://hbase.apache.org/book.html#developer[HowToContribute] and to the link:https://wiki.apache.org/hadoop/CodeReviewChecklist[code review checklist] should be rejected.
 Committers should always be polite to contributors and try to instruct and encourage them to contribute better patches.
 If a committer wishes to improve an unacceptable patch, then it should first be rejected, and a new patch should be attached by the committer for review.
 
@@ -2116,6 +2170,77 @@ However any substantive discussion (as with any off-list project-related discuss
 
 Misspellings and/or bad grammar is preferable to the disruption a JIRA comment edit causes: See the discussion at link:http://search-hadoop.com/?q=%5BReopened%5D+%28HBASE-451%29+Remove+HTableDescriptor+from+HRegionInfo&fc_project=HBase[Re:(HBASE-451) Remove HTableDescriptor from HRegionInfo]
 
+[[thirdparty]]
+=== The hbase-thirdparty dependency and shading/relocation
+
+A new project was created for the release of hbase-2.0.0. It was called
+`hbase-thirdparty`. This project exists only to provide the main hbase
+project with relocated -- or shaded -- versions of popular thirdparty
+libraries such as guava, netty, and protobuf. The mainline HBase project
+relies on the relocated versions of these libraries gotten from hbase-thirdparty
+rather than on finding these classes in their usual locations. We do this so
+we can specify whatever the version we wish. If we don't relocate, we must
+harmonize our version to match that which hadoop and/or spark uses.
+
+For developers, this means you need to be careful referring to classes from
+netty, guava, protobuf, gson, etc. (see the hbase-thirdparty pom.xml for what
+it provides). Devs must refer to the hbase-thirdparty provided classes. In
+practice, this is usually not an issue (though it can be a bit of a pain). You
+will have to hunt for the relocated version of your particular class. You'll
+find it by prepending the general relocation prefix of `org.apache.hadoop.hbase.shaded.`.
+For example if you are looking for `com.google.protobuf.Message`, the relocated
+version used by HBase internals can be found at
+`org.apache.hadoop.hbase.shaded.com.google.protobuf.Message`.
+
+For a few thirdparty libs, like protobuf (see the protobuf chapter in this book
+for the why), your IDE may give you both options -- the `com.google.protobuf.*`
+and the `org.apache.hadoop.hbase.shaded.com.google.protobuf.*` -- because both
+classes are on your CLASSPATH. Unless you are doing the particular juggling
+required in Coprocessor Endpoint development (again see above cited protobuf
+chapter), you'll want to use the shaded version, always.
+
+Of note, the relocation of netty is particular. The netty folks have put in
+place facility to aid relocation; it seems like shading netty is a popular project.
+One case of this requires the setting of a peculiar system property on the JVM
+so that classes out in the bundld shared library (.so) can be found in their
+relocated location. Here is the property that needs to be set:
+
+`-Dorg.apache.hadoop.hbase.shaded.io.netty.packagePrefix=org.apache.hadoop.hbase.shaded.`
+
+(Note that the trailing '.' is required). Starting hbase normally or when running
+test suites, the setting of this property is done for you. If you are doing something
+out of the ordinary, starting hbase from your own context, you'll need to provide
+this property on platforms that favor the bundled .so. See release notes on HBASE-18271
+for more. The complaint you see is something like the following:
+`Cause: java.lang.RuntimeException: Failed construction of Master: class org.apache.hadoop.hbase.master.HMasterorg.apache.hadoop.hbase.shaded.io.netty.channel.epoll.`
+
+If running unit tests and you run into the above message, add the system property
+to your surefire configuration by doing like the below:
+
+[source,xml]
+----
+  <plugin>
+    <artifactId>maven-surefire-plugin</artifactId>
+    <configuration>
+      <systemPropertyVariables>
+        <org.apache.hadoop.hbase.shaded.io.netty.packagePrefix>org.apache.hadoop.hbase.shaded.</org.apache.hadoop.hbase.shaded.io.netty.packagePrefix>
+      </systemPropertyVariables>
+    </configuration>
+  </plugin>
+----
+
+Again the trailing period in the value above is intended.
+
+The `hbase-thirdparty` project has groupid of `org.apache.hbase.thirdparty`.
+As of this writing, it provides three jars; one for netty with an artifactid of
+`hbase-thirdparty-netty`, one for protobuf at `hbase-thirdparty-protobuf` and then
+a jar for all else -- gson, guava -- at `hbase-thirdpaty-miscellaneous`.
+
+The hbase-thirdparty artifacts are a product produced by the Apache HBase
+project under the aegis of the HBase Project Management Committee. Releases
+are done via the usual voting project on the hbase dev mailing list. If issue
+in the hbase-thirdparty, use the hbase JIRA and mailing lists to post notice.
+
 [[hbase.archetypes.development]]
 === Development of HBase-related Maven archetypes
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/external_apis.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/external_apis.adoc b/src/main/asciidoc/_chapters/external_apis.adoc
index 2f85461..ffb6ee6 100644
--- a/src/main/asciidoc/_chapters/external_apis.adoc
+++ b/src/main/asciidoc/_chapters/external_apis.adoc
@@ -29,7 +29,7 @@
 
 This chapter will cover access to Apache HBase either through non-Java languages and
 through custom protocols. For information on using the native HBase APIs, refer to
-link:http://hbase.apache.org/apidocs/index.html[User API Reference] and the
+link:https://hbase.apache.org/apidocs/index.html[User API Reference] and the
 <<hbase_apis,HBase APIs>> chapter.
 
 == REST
@@ -288,18 +288,17 @@ your filter to the file. For example, to return only rows for
 which keys start with <codeph>u123</codeph> and use a batch size
 of 100, the filter file would look like this:
 
-+++
-<pre>
-&lt;Scanner batch="100"&gt;
-  &lt;filter&gt;
+[source,xml]
+----
+<Scanner batch="100">
+  <filter>
     {
       "type": "PrefixFilter",
       "value": "u123"
     }
-  &lt;/filter&gt;
-&lt;/Scanner&gt;
-</pre>
-+++
+  </filter>
+</Scanner>
+----
 
 Pass the file to the `-d` argument of the `curl` request.
 |curl -vi -X PUT \
@@ -626,7 +625,9 @@ Documentation about Thrift has moved to <<thrift>>.
 == C/C++ Apache HBase Client
 
 FB's Chip Turner wrote a pure C/C++ client.
-link:https://github.com/facebook/native-cpp-hbase-client[Check it out].
+link:https://github.com/hinaria/native-cpp-hbase-client[Check it out].
+
+C++ client implementation. To see link:https://issues.apache.org/jira/browse/HBASE-14850[HBASE-14850].
 
 [[jdo]]
 
@@ -640,8 +641,8 @@ represent persistent data.
 This code example has the following dependencies:
 
 . HBase 0.90.x or newer
-. commons-beanutils.jar (http://commons.apache.org/)
-. commons-pool-1.5.5.jar (http://commons.apache.org/)
+. commons-beanutils.jar (https://commons.apache.org/)
+. commons-pool-1.5.5.jar (https://commons.apache.org/)
 . transactional-tableindexed for HBase 0.90 (https://github.com/hbase-trx/hbase-transactional-tableindexed)
 
 .Download `hbase-jdo`
@@ -801,7 +802,7 @@ with HBase.
 ----
 resolvers += "Apache HBase" at "https://repository.apache.org/content/repositories/releases"
 
-resolvers += "Thrift" at "http://people.apache.org/~rawson/repo/"
+resolvers += "Thrift" at "https://people.apache.org/~rawson/repo/"
 
 libraryDependencies ++= Seq(
     "org.apache.hadoop" % "hadoop-core" % "0.20.2",

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/faq.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/faq.adoc b/src/main/asciidoc/_chapters/faq.adoc
index 9034d4b..0e498ac 100644
--- a/src/main/asciidoc/_chapters/faq.adoc
+++ b/src/main/asciidoc/_chapters/faq.adoc
@@ -33,10 +33,10 @@ When should I use HBase?::
   See <<arch.overview>> in the Architecture chapter.
 
 Are there other HBase FAQs?::
-  See the FAQ that is up on the wiki, link:http://wiki.apache.org/hadoop/Hbase/FAQ[HBase Wiki FAQ].
+  See the FAQ that is up on the wiki, link:https://wiki.apache.org/hadoop/Hbase/FAQ[HBase Wiki FAQ].
 
 Does HBase support SQL?::
-  Not really. SQL-ish support for HBase via link:http://hive.apache.org/[Hive] is in development, however Hive is based on MapReduce which is not generally suitable for low-latency requests. See the <<datamodel>> section for examples on the HBase client.
+  Not really. SQL-ish support for HBase via link:https://hive.apache.org/[Hive] is in development, however Hive is based on MapReduce which is not generally suitable for low-latency requests. See the <<datamodel>> section for examples on the HBase client.
 
 How can I find examples of NoSQL/HBase?::
   See the link to the BigTable paper in <<other.info>>, as well as the other papers.

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/getting_started.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/getting_started.adoc b/src/main/asciidoc/_chapters/getting_started.adoc
index 0e50273..2fdb949 100644
--- a/src/main/asciidoc/_chapters/getting_started.adoc
+++ b/src/main/asciidoc/_chapters/getting_started.adoc
@@ -51,7 +51,7 @@ Apart from downloading HBase, this procedure should take less than 10 minutes.
 
 Prior to HBase 0.94.x, HBase expected the loopback IP address to be 127.0.0.1.
 Ubuntu and some other distributions default to 127.0.1.1 and this will cause
-problems for you. See link:http://devving.com/?p=414[Why does HBase care about /etc/hosts?] for detail
+problems for you. See link:https://web-beta.archive.org/web/20140104070155/http://blog.devving.com/why-does-hbase-care-about-etchosts[Why does HBase care about /etc/hosts?] for detail
 
 The following _/etc/hosts_ file works correctly for HBase 0.94.x and earlier, on Ubuntu. Use this as a template if you run into trouble.
 [listing]
@@ -70,7 +70,7 @@ See <<java,Java>> for information about supported JDK versions.
 === Get Started with HBase
 
 .Procedure: Download, Configure, and Start HBase in Standalone Mode
-. Choose a download site from this list of link:http://www.apache.org/dyn/closer.cgi/hbase/[Apache Download Mirrors].
+. Choose a download site from this list of link:https://www.apache.org/dyn/closer.cgi/hbase/[Apache Download Mirrors].
   Click on the suggested top link.
   This will take you to a mirror of _HBase Releases_.
   Click on the folder named _stable_ and then download the binary file that ends in _.tar.gz_ to your local filesystem.
@@ -307,7 +307,7 @@ You can skip the HDFS configuration to continue storing your data in the local f
 This procedure assumes that you have configured Hadoop and HDFS on your local system and/or a remote
 system, and that they are running and available. It also assumes you are using Hadoop 2.
 The guide on
-link:http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/SingleCluster.html[Setting up a Single Node Cluster]
+link:https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/SingleCluster.html[Setting up a Single Node Cluster]
 in the Hadoop documentation is a good starting point.
 ====
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/hbase-default.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/hbase-default.adoc b/src/main/asciidoc/_chapters/hbase-default.adoc
index 6b11945..9b3cfb7 100644
--- a/src/main/asciidoc/_chapters/hbase-default.adoc
+++ b/src/main/asciidoc/_chapters/hbase-default.adoc
@@ -376,23 +376,6 @@ The WAL file writer implementation.
 .Default
 `org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter`
 
-
-[[hbase.master.distributed.log.replay]]
-*`hbase.master.distributed.log.replay`*::
-+
-.Description
-Enable 'distributed log replay' as default engine splitting
-    WAL files on server crash.  This default is new in hbase 1.0.  To fall
-    back to the old mode 'distributed log splitter', set the value to
-    'false'.  'Disributed log replay' improves MTTR because it does not
-    write intermediate files.  'DLR' required that 'hfile.format.version'
-    be set to version 3 or higher.
-
-+
-.Default
-`true`
-
-
 [[hbase.regionserver.global.memstore.size]]
 *`hbase.regionserver.global.memstore.size`*::
 +
@@ -461,11 +444,12 @@ The host name or IP address of the name server (DNS)
 
       A split policy determines when a region should be split. The various other split policies that
       are available currently are ConstantSizeRegionSplitPolicy, DisabledRegionSplitPolicy,
-      DelimitedKeyPrefixRegionSplitPolicy, KeyPrefixRegionSplitPolicy etc.
+      DelimitedKeyPrefixRegionSplitPolicy, KeyPrefixRegionSplitPolicy,
+      BusyRegionSplitPolicy, SteppingSplitPolicy etc.
 
 +
 .Default
-`org.apache.hadoop.hbase.regionserver.IncreasingToUpperBoundRegionSplitPolicy`
+`org.apache.hadoop.hbase.regionserver.SteppingSplitPolicy`
 
 
 [[zookeeper.session.timeout]]
@@ -475,7 +459,7 @@ The host name or IP address of the name server (DNS)
 ZooKeeper session timeout in milliseconds. It is used in two different ways.
       First, this value is used in the ZK client that HBase uses to connect to the ensemble.
       It is also used by HBase when it starts a ZK server and it is passed as the 'maxSessionTimeout'. See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions.
+      https://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions.
       For example, if an HBase region server connects to a ZK ensemble that's also managed
       by HBase, then the
       session timeout will be the one specified by this configuration. But, a region server that connects
@@ -539,7 +523,7 @@ The host name or IP address of the name server (DNS)
 +
 .Description
 Port used by ZooKeeper peers to talk to each other.
-    See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
+    See https://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
     for more information.
 +
 .Default
@@ -551,7 +535,7 @@ Port used by ZooKeeper peers to talk to each other.
 +
 .Description
 Port used by ZooKeeper for leader election.
-    See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
+    See https://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
     for more information.
 +
 .Default
@@ -636,7 +620,7 @@ Property from ZooKeeper's config zoo.cfg.
 *`hbase.client.write.buffer`*::
 +
 .Description
-Default size of the HTable client write buffer in bytes.
+Default size of the BufferedMutator write buffer in bytes.
     A bigger buffer takes more memory -- on both the client and server
     side since server instantiates the passed write buffer to process
     it -- but a larger buffer size reduces the number of RPCs made.
@@ -694,7 +678,7 @@ The maximum number of concurrent tasks a single HTable instance will
     send to a single region server.
 +
 .Default
-`5`
+`2`
 
 
 [[hbase.client.max.perregion.tasks]]
@@ -1263,9 +1247,8 @@ A comma-separated list of sizes for buckets for the bucketcache
 +
 .Description
 The HFile format version to use for new files.
-      Version 3 adds support for tags in hfiles (See http://hbase.apache.org/book.html#hbase.tags).
-      Distributed Log Replay requires that tags are enabled. Also see the configuration
-      'hbase.replication.rpc.codec'.
+      Version 3 adds support for tags in hfiles (See https://hbase.apache.org/book.html#hbase.tags).
+      Also see the configuration 'hbase.replication.rpc.codec'.
 
 +
 .Default
@@ -1963,7 +1946,7 @@ If the DFSClient configuration
 
       Class used to execute the regions balancing when the period occurs.
       See the class comment for more on how it works
-      http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
+      https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
       It replaces the DefaultLoadBalancer as the default (since renamed
       as the SimpleLoadBalancer).
 
@@ -2023,17 +2006,6 @@ A comma-separated list of
 .Default
 ``
 
-
-[[hbase.coordinated.state.manager.class]]
-*`hbase.coordinated.state.manager.class`*::
-+
-.Description
-Fully qualified name of class implementing coordinated state manager.
-+
-.Default
-`org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager`
-
-
 [[hbase.regionserver.storefile.refresh.period]]
 *`hbase.regionserver.storefile.refresh.period`*::
 +
@@ -2111,7 +2083,7 @@ Fully qualified name of class implementing coordinated state manager.
 
 +
 .Default
-`10`
+`16`
 
 
 [[hbase.replication.rpc.codec]]

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/hbase_apis.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/hbase_apis.adoc b/src/main/asciidoc/_chapters/hbase_apis.adoc
index f27c9dc..e466db9 100644
--- a/src/main/asciidoc/_chapters/hbase_apis.adoc
+++ b/src/main/asciidoc/_chapters/hbase_apis.adoc
@@ -28,7 +28,7 @@
 :experimental:
 
 This chapter provides information about performing operations using HBase native APIs.
-This information is not exhaustive, and provides a quick reference in addition to the link:http://hbase.apache.org/apidocs/index.html[User API Reference].
+This information is not exhaustive, and provides a quick reference in addition to the link:https://hbase.apache.org/apidocs/index.html[User API Reference].
 The examples here are not comprehensive or complete, and should be used for purposes of illustration only.
 
 Apache HBase also works with multiple external APIs.

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/mapreduce.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/mapreduce.adoc b/src/main/asciidoc/_chapters/mapreduce.adoc
index dfa843a..cc9dce4 100644
--- a/src/main/asciidoc/_chapters/mapreduce.adoc
+++ b/src/main/asciidoc/_chapters/mapreduce.adoc
@@ -27,10 +27,10 @@
 :icons: font
 :experimental:
 
-Apache MapReduce is a software framework used to analyze large amounts of data, and is the framework used most often with link:http://hadoop.apache.org/[Apache Hadoop].
+Apache MapReduce is a software framework used to analyze large amounts of data. It is provided by link:https://hadoop.apache.org/[Apache Hadoop].
 MapReduce itself is out of the scope of this document.
-A good place to get started with MapReduce is http://hadoop.apache.org/docs/r2.6.0/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html.
-MapReduce version 2 (MR2)is now part of link:http://hadoop.apache.org/docs/r2.3.0/hadoop-yarn/hadoop-yarn-site/[YARN].
+A good place to get started with MapReduce is https://hadoop.apache.org/docs/r2.6.0/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html.
+MapReduce version 2 (MR2)is now part of link:https://hadoop.apache.org/docs/r2.3.0/hadoop-yarn/hadoop-yarn-site/[YARN].
 
 This chapter discusses specific configuration steps you need to take to use MapReduce on data within HBase.
 In addition, it discusses other interactions and issues between HBase and MapReduce
@@ -40,44 +40,88 @@ link:http://www.cascading.org/[alternative API] for MapReduce.
 .`mapred` and `mapreduce`
 [NOTE]
 ====
-There are two mapreduce packages in HBase as in MapReduce itself: _org.apache.hadoop.hbase.mapred_      and _org.apache.hadoop.hbase.mapreduce_.
-The former does old-style API and the latter the new style.
+There are two mapreduce packages in HBase as in MapReduce itself: _org.apache.hadoop.hbase.mapred_ and _org.apache.hadoop.hbase.mapreduce_.
+The former does old-style API and the latter the new mode.
 The latter has more facility though you can usually find an equivalent in the older package.
 Pick the package that goes with your MapReduce deploy.
-When in doubt or starting over, pick the _org.apache.hadoop.hbase.mapreduce_.
-In the notes below, we refer to o.a.h.h.mapreduce but replace with the o.a.h.h.mapred if that is what you are using.
+When in doubt or starting over, pick _org.apache.hadoop.hbase.mapreduce_.
+In the notes below, we refer to _o.a.h.h.mapreduce_ but replace with
+_o.a.h.h.mapred_ if that is what you are using.
 ====
 
 [[hbase.mapreduce.classpath]]
 == HBase, MapReduce, and the CLASSPATH
 
-By default, MapReduce jobs deployed to a MapReduce cluster do not have access to either the HBase configuration under `$HBASE_CONF_DIR` or the HBase classes.
+By default, MapReduce jobs deployed to a MapReduce cluster do not have access to
+either the HBase configuration under `$HBASE_CONF_DIR` or the HBase classes.
 
-To give the MapReduce jobs the access they need, you could add _hbase-site.xml_ to _$HADOOP_HOME/conf_ and add HBase jars to the _$HADOOP_HOME/lib_ directory.
-You would then need to copy these changes across your cluster. Or you can edit _$HADOOP_HOME/conf/hadoop-env.sh_ and add them to the `HADOOP_CLASSPATH` variable.
-However, this approach is not recommended because it will pollute your Hadoop install with HBase references.
-It also requires you to restart the Hadoop cluster before Hadoop can use the HBase data.
+To give the MapReduce jobs the access they need, you could add _hbase-site.xml_to _$HADOOP_HOME/conf_ and add HBase jars to the _$HADOOP_HOME/lib_ directory.
+You would then need to copy these changes across your cluster. Or you could edit _$HADOOP_HOME/conf/hadoop-env.sh_ and add hbase dependencies to the `HADOOP_CLASSPATH` variable.
+Neither of these approaches is recommended because it will pollute your Hadoop install with HBase references.
+It also requires you restart the Hadoop cluster before Hadoop can use the HBase data.
 
-The recommended approach is to let HBase add its dependency jars itself and use `HADOOP_CLASSPATH` or `-libjars`.
+The recommended approach is to let HBase add its dependency jars and use `HADOOP_CLASSPATH` or `-libjars`.
 
-Since HBase 0.90.x, HBase adds its dependency JARs to the job configuration itself.
-The dependencies only need to be available on the local `CLASSPATH`.
-The following example runs the bundled HBase link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter] MapReduce job against a table named `usertable`.
-If you have not set the environment variables expected in the command (the parts prefixed by a `$` sign and surrounded by curly braces), you can use the actual system paths instead.
-Be sure to use the correct version of the HBase JAR for your system.
-The backticks (``` symbols) cause the shell to execute the sub-commands, setting the output of `hbase classpath` (the command to dump HBase CLASSPATH) to `HADOOP_CLASSPATH`.
+Since HBase `0.90.x`, HBase adds its dependency JARs to the job configuration itself.
+The dependencies only need to be available on the local `CLASSPATH` and from here they'll be picked
+up and bundled into the fat job jar deployed to the MapReduce cluster. A basic trick just passes
+the full hbase classpath -- all hbase and dependent jars as well as configurations -- to the mapreduce
+job runner letting hbase utility pick out from the full-on classpath what it needs adding them to the
+MapReduce job configuration (See the source at `TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)` for how this is done).
+
+
+The following example runs the bundled HBase link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter] MapReduce job against a table named `usertable`.
+It sets into `HADOOP_CLASSPATH` the jars hbase needs to run in an MapReduce context (including configuration files such as hbase-site.xml).
+Be sure to use the correct version of the HBase JAR for your system; replace the VERSION string in the below command line w/ the version of
+your local hbase install.  The backticks (``` symbols) cause the shell to execute the sub-commands, setting the output of `hbase classpath` into `HADOOP_CLASSPATH`.
 This example assumes you use a BASH-compatible shell.
 
 [source,bash]
 ----
-$ HADOOP_CLASSPATH=`${HBASE_HOME}/bin/hbase classpath` ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/lib/hbase-server-VERSION.jar rowcounter usertable
+$ HADOOP_CLASSPATH=`${HBASE_HOME}/bin/hbase classpath` \
+  ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/lib/hbase-mapreduce-VERSION.jar \
+  org.apache.hadoop.hbase.mapreduce.RowCounter usertable
+----
+
+The above command will launch a row counting mapreduce job against the hbase cluster that is pointed to by your local configuration on a cluster that the hadoop configs are pointing to.
+
+The main for the `hbase-mapreduce.jar` is a Driver that lists a few basic mapreduce tasks that ship with hbase.
+For example, presuming your install is hbase `2.0.0-SNAPSHOT`:
+
+[source,bash]
+----
+$ HADOOP_CLASSPATH=`${HBASE_HOME}/bin/hbase classpath` \
+  ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/lib/hbase-mapreduce-2.0.0-SNAPSHOT.jar
+An example program must be given as the first argument.
+Valid program names are:
+  CellCounter: Count cells in HBase table.
+  WALPlayer: Replay WAL files.
+  completebulkload: Complete a bulk data load.
+  copytable: Export a table from local cluster to peer cluster.
+  export: Write table data to HDFS.
+  exportsnapshot: Export the specific snapshot to a given FileSystem.
+  import: Import data written by Export.
+  importtsv: Import data in TSV format.
+  rowcounter: Count rows in HBase table.
+  verifyrep: Compare the data from tables in two different clusters. WARNING: It doesn't work for incrementColumnValues'd cells since the timestamp is changed after being appended to the log.
+
+----
+
+You can use the above listed shortnames for mapreduce jobs as in the below re-run of the row counter job (again, presuming your install is hbase `2.0.0-SNAPSHOT`):
+
+[source,bash]
+----
+$ HADOOP_CLASSPATH=`${HBASE_HOME}/bin/hbase classpath` \
+  ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/lib/hbase-mapreduce-2.0.0-SNAPSHOT.jar \
+  rowcounter usertable
 ----
 
-When the command runs, internally, the HBase JAR finds the dependencies it needs and adds them to the MapReduce job configuration.
-See the source at `TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)` for how this is done.
+You might find the more selective `hbase mapredcp` tool output of interest; it lists the minimum set of jars needed
+to run a basic mapreduce job against an hbase install. It does not include configuration. You'll probably need to add
+these if you want your MapReduce job to find the target cluster. You'll probably have to also add pointers to extra jars
+once you start to do anything of substance. Just specify the extras by passing the system propery `-Dtmpjars` when
+you run `hbase mapredcp`. 
 
-The command `hbase mapredcp` can also help you dump the CLASSPATH entries required by MapReduce, which are the same jars `TableMapReduceUtil#addDependencyJars` would add.
-You can add them together with HBase conf directory to `HADOOP_CLASSPATH`.
 For jobs that do not package their dependencies or call `TableMapReduceUtil#addDependencyJars`, the following command structure is necessary:
 
 [source,bash]
@@ -215,10 +259,10 @@ $ ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/hbase-server-VERSION.jar rowcounte
 
 == HBase as a MapReduce Job Data Source and Data Sink
 
-HBase can be used as a data source, link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormat.html[TableInputFormat], and data sink, link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.html[TableOutputFormat] or link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.html[MultiTableOutputFormat], for MapReduce jobs.
-Writing MapReduce jobs that read or write HBase, it is advisable to subclass link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableMapper.html[TableMapper]        and/or link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableReducer.html[TableReducer].
-See the do-nothing pass-through classes link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.html[IdentityTableMapper] and link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.html[IdentityTableReducer] for basic usage.
-For a more involved example, see link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter] or review the `org.apache.hadoop.hbase.mapreduce.TestTableMapReduce` unit test.
+HBase can be used as a data source, link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormat.html[TableInputFormat], and data sink, link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.html[TableOutputFormat] or link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.html[MultiTableOutputFormat], for MapReduce jobs.
+Writing MapReduce jobs that read or write HBase, it is advisable to subclass link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableMapper.html[TableMapper]        and/or link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableReducer.html[TableReducer].
+See the do-nothing pass-through classes link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.html[IdentityTableMapper] and link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.html[IdentityTableReducer] for basic usage.
+For a more involved example, see link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter] or review the `org.apache.hadoop.hbase.mapreduce.TestTableMapReduce` unit test.
 
 If you run MapReduce jobs that use HBase as source or sink, need to specify source and sink table and column names in your configuration.
 
@@ -231,7 +275,7 @@ On insert, HBase 'sorts' so there is no point double-sorting (and shuffling data
 If you do not need the Reduce, your map might emit counts of records processed for reporting at the end of the job, or set the number of Reduces to zero and use TableOutputFormat.
 If running the Reduce step makes sense in your case, you should typically use multiple reducers so that load is spread across the HBase cluster.
 
-A new HBase partitioner, the link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.html[HRegionPartitioner], can run as many reducers the number of existing regions.
+A new HBase partitioner, the link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.html[HRegionPartitioner], can run as many reducers the number of existing regions.
 The HRegionPartitioner is suitable when your table is large and your upload will not greatly alter the number of existing regions upon completion.
 Otherwise use the default partitioner.
 
@@ -242,7 +286,7 @@ For more on how this mechanism works, see <<arch.bulk.load>>.
 
 == RowCounter Example
 
-The included link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter] MapReduce job uses `TableInputFormat` and does a count of all rows in the specified table.
+The included link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter] MapReduce job uses `TableInputFormat` and does a count of all rows in the specified table.
 To run it, use the following command:
 
 [source,bash]
@@ -262,13 +306,13 @@ If you have classpath errors, see <<hbase.mapreduce.classpath>>.
 [[splitter.default]]
 === The Default HBase MapReduce Splitter
 
-When link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormat.html[TableInputFormat] is used to source an HBase table in a MapReduce job, its splitter will make a map task for each region of the table.
+When link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormat.html[TableInputFormat] is used to source an HBase table in a MapReduce job, its splitter will make a map task for each region of the table.
 Thus, if there are 100 regions in the table, there will be 100 map-tasks for the job - regardless of how many column families are selected in the Scan.
 
 [[splitter.custom]]
 === Custom Splitters
 
-For those interested in implementing custom splitters, see the method `getSplits` in link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.html[TableInputFormatBase].
+For those interested in implementing custom splitters, see the method `getSplits` in link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.html[TableInputFormatBase].
 That is where the logic for map-task assignment resides.
 
 [[mapreduce.example]]
@@ -308,7 +352,7 @@ if (!b) {
 }
 ----
 
-...and the mapper instance would extend link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableMapper.html[TableMapper]...
+...and the mapper instance would extend link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableMapper.html[TableMapper]...
 
 [source,java]
 ----
@@ -356,7 +400,7 @@ if (!b) {
 }
 ----
 
-An explanation is required of what `TableMapReduceUtil` is doing, especially with the reducer. link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.html[TableOutputFormat] is being used as the outputFormat class, and several parameters are being set on the config (e.g., `TableOutputFormat.OUTPUT_TABLE`), as well as setting the reducer output key to `ImmutableBytesWritable` and reducer value to `Writable`.
+An explanation is required of what `TableMapReduceUtil` is doing, especially with the reducer. link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.html[TableOutputFormat] is being used as the outputFormat class, and several parameters are being set on the config (e.g., `TableOutputFormat.OUTPUT_TABLE`), as well as setting the reducer output key to `ImmutableBytesWritable` and reducer value to `Writable`.
 These could be set by the programmer on the job and conf, but `TableMapReduceUtil` tries to make things easier.
 
 The following is the example mapper, which will create a `Put` and matching the input `Result` and emit it.

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/ops_mgt.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc
index 6181b13..d4478fa 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -332,7 +332,7 @@ See <<hfile_tool>>.
 === WAL Tools
 
 [[hlog_tool]]
-==== `FSHLog` tool
+==== FSHLog tool
 
 The main method on `FSHLog` offers manual split and dump facilities.
 Pass it WALs or the product of a split, the content of the _recovered.edits_.
@@ -353,9 +353,9 @@ Similarly you can force a split of a log file directory by doing:
 ----
 
 [[hlog_tool.prettyprint]]
-===== WAL Pretty Printer
+===== WALPrettyPrinter
 
-The WAL Pretty Printer is a tool with configurable options to print the contents of a WAL.
+The `WALPrettyPrinter` is a tool with configurable options to print the contents of a WAL.
 You can invoke it via the HBase cli with the 'wal' command.
 
 ----
@@ -365,7 +365,7 @@ You can invoke it via the HBase cli with the 'wal' command.
 .WAL Printing in older versions of HBase
 [NOTE]
 ====
-Prior to version 2.0, the WAL Pretty Printer was called the `HLogPrettyPrinter`, after an internal name for HBase's write ahead log.
+Prior to version 2.0, the `WALPrettyPrinter` was called the `HLogPrettyPrinter`, after an internal name for HBase's write ahead log.
 In those versions, you can print the contents of a WAL using the same configuration as above, but with the 'hlog' command.
 
 ----
@@ -444,12 +444,56 @@ See Jonathan Hsieh's link:https://blog.cloudera.com/blog/2012/06/online-hbase-ba
 === Export
 
 Export is a utility that will dump the contents of table to HDFS in a sequence file.
-Invoke via:
+The Export can be run via a Coprocessor Endpoint or MapReduce. Invoke via:
 
+*mapreduce-based Export*
 ----
 $ bin/hbase org.apache.hadoop.hbase.mapreduce.Export <tablename> <outputdir> [<versions> [<starttime> [<endtime>]]]
 ----
 
+*endpoint-based Export*
+----
+$ bin/hbase org.apache.hadoop.hbase.coprocessor.Export <tablename> <outputdir> [<versions> [<starttime> [<endtime>]]]
+----
+
+*The Comparison of Endpoint-based Export And Mapreduce-based Export*
+|===
+||Endpoint-based Export|Mapreduce-based Export
+
+|HBase version requirement
+|2.0+
+|0.2.1+
+
+|Maven dependency
+|hbase-endpoint
+|hbase-mapreduce (2.0+), hbase-server(prior to 2.0)
+
+|Requirement before dump
+|mount the endpoint.Export on the target table
+|deploy the MapReduce framework
+
+|Read latency
+|low, directly read the data from region
+|normal, traditional RPC scan
+
+|Read Scalability
+|depend on number of regions
+|depend on number of mappers (see TableInputFormatBase#getSplits)
+
+|Timeout
+|operation timeout. configured by hbase.client.operation.timeout
+|scan timeout. configured by hbase.client.scanner.timeout.period
+
+|Permission requirement
+|READ, EXECUTE
+|READ
+
+|Fault tolerance
+|no
+|depend on MapReduce
+|===
+
+
 NOTE: To see usage instructions, run the command with no options. Available options include
 specifying column families and applying filters during the export.
 
@@ -577,7 +621,7 @@ There are two ways to invoke this utility, with explicit classname and via the d
 
 .Explicit Classname
 ----
-$ bin/hbase org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles <hdfs://storefileoutput> <tablename>
+$ bin/hbase org.apache.hadoop.hbase.tool.LoadIncrementalHFiles <hdfs://storefileoutput> <tablename>
 ----
 
 .Driver
@@ -620,7 +664,7 @@ To NOT run WALPlayer as a mapreduce job on your cluster, force it to run all in
 [[rowcounter]]
 === RowCounter and CellCounter
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter]        is a mapreduce job to count all the rows of a table.
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter]        is a mapreduce job to count all the rows of a table.
 This is a good utility to use as a sanity check to ensure that HBase can read all the blocks of a table if there are any concerns of metadata inconsistency.
 It will run the mapreduce all in a single process but it will run faster if you have a MapReduce cluster in place for it to exploit. It is also possible to limit
 the time range of data to be scanned by using the `--starttime=[starttime]` and `--endtime=[endtime]` flags.
@@ -633,7 +677,7 @@ RowCounter only counts one version per cell.
 
 Note: caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration.
 
-HBase ships another diagnostic mapreduce job called link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html[CellCounter].
+HBase ships another diagnostic mapreduce job called link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html[CellCounter].
 Like RowCounter, it gathers more fine-grained statistics about your table.
 The statistics gathered by RowCounter are more fine-grained and include:
 
@@ -666,7 +710,7 @@ See link:https://issues.apache.org/jira/browse/HBASE-4391[HBASE-4391 Add ability
 === Offline Compaction Tool
 
 See the usage for the
-link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/CompactionTool.html[CompactionTool].
+link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/CompactionTool.html[CompactionTool].
 Run it like:
 
 [source, bash]
@@ -722,7 +766,7 @@ The LoadTestTool has received many updates in recent HBase releases, including s
 [[ops.regionmgt.majorcompact]]
 === Major Compaction
 
-Major compactions can be requested via the HBase shell or link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Admin.html#majorCompact%28java.lang.String%29[Admin.majorCompact].
+Major compactions can be requested via the HBase shell or link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Admin.html#majorCompact-org.apache.hadoop.hbase.TableName-[Admin.majorCompact].
 
 Note: major compactions do NOT do region merges.
 See <<compaction,compaction>> for more information about compactions.
@@ -739,7 +783,7 @@ $ bin/hbase org.apache.hadoop.hbase.util.Merge <tablename> <region1> <region2>
 
 If you feel you have too many regions and want to consolidate them, Merge is the utility you need.
 Merge must run be done when the cluster is down.
-See the link:http://ofps.oreilly.com/titles/9781449396107/performance.html[O'Reilly HBase
+See the link:https://web.archive.org/web/20111231002503/http://ofps.oreilly.com/titles/9781449396107/performance.html[O'Reilly HBase
           Book] for an example of usage.
 
 You will need to pass 3 parameters to this application.
@@ -868,7 +912,7 @@ But usually disks do the "John Wayne" -- i.e.
 take a while to go down spewing errors in _dmesg_ -- or for some reason, run much slower than their companions.
 In this case you want to decommission the disk.
 You have two options.
-You can link:http://wiki.apache.org/hadoop/FAQ#I_want_to_make_a_large_cluster_smaller_by_taking_out_a_bunch_of_nodes_simultaneously._How_can_this_be_done.3F[decommission
+You can link:https://wiki.apache.org/hadoop/FAQ#I_want_to_make_a_large_cluster_smaller_by_taking_out_a_bunch_of_nodes_simultaneously._How_can_this_be_done.3F[decommission
             the datanode] or, less disruptive in that only the bad disks data will be rereplicated, can stop the datanode, unmount the bad volume (You can't umount a volume while the datanode is using it), and then restart the datanode (presuming you have set dfs.datanode.failed.volumes.tolerated > 0). The regionserver will throw some errors in its logs as it recalibrates where to get its data from -- it will likely roll its WAL log too -- but in general but for some latency spikes, it should keep on chugging.
 
 .Short Circuit Reads
@@ -1006,7 +1050,7 @@ In this case, or if you are in a OLAP environment and require having locality, t
 [[hbase_metrics]]
 == HBase Metrics
 
-HBase emits metrics which adhere to the link:http://hadoop.apache.org/core/docs/current/api/org/apache/hadoop/metrics/package-summary.html[Hadoop metrics] API.
+HBase emits metrics which adhere to the link:https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/Metrics.html[Hadoop Metrics] API.
 Starting with HBase 0.95footnote:[The Metrics system was redone in
           HBase 0.96. See Migration
             to the New Metrics Hotness – Metrics2 by Elliot Clark for detail], HBase is configured to emit a default set of metrics with a default sampling period of every 10 seconds.
@@ -1021,7 +1065,7 @@ To configure metrics for a given region server, edit the _conf/hadoop-metrics2-h
 Restart the region server for the changes to take effect.
 
 To change the sampling rate for the default sink, edit the line beginning with `*.period`.
-To filter which metrics are emitted or to extend the metrics framework, see http://hadoop.apache.org/docs/current/api/org/apache/hadoop/metrics2/package-summary.html
+To filter which metrics are emitted or to extend the metrics framework, see https://hadoop.apache.org/docs/current/api/org/apache/hadoop/metrics2/package-summary.html
 
 .HBase Metrics and Ganglia
 [NOTE]
@@ -1029,7 +1073,7 @@ To filter which metrics are emitted or to extend the metrics framework, see http
 By default, HBase emits a large number of metrics per region server.
 Ganglia may have difficulty processing all these metrics.
 Consider increasing the capacity of the Ganglia server or reducing the number of metrics emitted by HBase.
-See link:http://hadoop.apache.org/docs/current/api/org/apache/hadoop/metrics2/package-summary.html#filtering[Metrics Filtering].
+See link:https://hadoop.apache.org/docs/current/api/org/apache/hadoop/metrics2/package-summary.html#filtering[Metrics Filtering].
 ====
 
 === Disabling Metrics
@@ -1287,7 +1331,7 @@ Have a look in the Web UI.
 == Cluster Replication
 
 NOTE: This information was previously available at
-link:http://hbase.apache.org#replication[Cluster Replication].
+link:https://hbase.apache.org/0.94/replication.html[Cluster Replication].
 
 HBase provides a cluster replication mechanism which allows you to keep one cluster's state synchronized with that of another cluster, using the write-ahead log (WAL) of the source cluster to propagate the changes.
 Some use cases for cluster replication include:
@@ -1323,9 +1367,11 @@ If a slave cluster does run out of room, or is inaccessible for other reasons, i
 .Consistency Across Replicated Clusters
 [WARNING]
 ====
-How your application builds on top of the HBase API matters when replication is in play. HBase's replication system provides at-least-once delivery of client edits for an enabled column family to each configured destination cluster. In the event of failure to reach a given destination, the replication system will retry sending edits in a way that might repeat a given message. Further more, there is not a guaranteed order of delivery for client edits. In the event of a RegionServer failing, recovery of the replication queue happens independent of recovery of the individual regions that server was previously handling. This means that it is possible for the not-yet-replicated edits to be serviced by a RegionServer that is currently slower to replicate than the one that handles edits from after the failure.
+How your application builds on top of the HBase API matters when replication is in play. HBase's replication system provides at-least-once delivery of client edits for an enabled column family to each configured destination cluster. In the event of failure to reach a given destination, the replication system will retry sending edits in a way that might repeat a given message. HBase provides two ways of replication, one is the original replication and the other is serial replication. In the previous way of replication, there is not a guaranteed order of delivery for client edits. In the event of a RegionServer failing, recovery of the replication queue happens independent of recovery of the individual regions that server was previously handling. This means that it is possible for the not-yet-replicated edits to be serviced by a RegionServer that is currently slower to replicate than the one that handles edits from after the failure.
 
 The combination of these two properties (at-least-once delivery and the lack of message ordering) means that some destination clusters may end up in a different state if your application makes use of operations that are not idempotent, e.g. Increments.
+
+To solve the problem, HBase now supports serial replication, which sends edits to destination cluster as the order of requests from client.
 ====
 
 .Terminology Changes
@@ -1351,6 +1397,7 @@ image::hbase_replication_diagram.jpg[]
 HBase replication borrows many concepts from the [firstterm]_statement-based replication_ design used by MySQL.
 Instead of SQL statements, entire WALEdits (consisting of multiple cell inserts coming from Put and Delete operations on the clients) are replicated in order to maintain atomicity.
 
+[[hbase.replication.management]]
 === Managing and Configuring Cluster Replication
 .Cluster Configuration Overview
 
@@ -1365,11 +1412,15 @@ Instead of SQL statements, entire WALEdits (consisting of multiple cell inserts
 LOG.info("Replicating "+clusterId + " -> " + peerClusterId);
 ----
 
+.Serial Replication Configuration
+See <<Serial Replication,Serial Replication>>
+
 .Cluster Management Commands
 add_peer <ID> <CLUSTER_KEY>::
   Adds a replication relationship between two clusters. +
   * ID -- a unique string, which must not contain a hyphen.
   * CLUSTER_KEY: composed using the following template, with appropriate place-holders: `hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent`
+  * STATE(optional): ENABLED or DISABLED, default value is ENABLED
 list_peers:: list all replication relationships known by this cluster
 enable_peer <ID>::
   Enable a previously-disabled replication relationship
@@ -1385,6 +1436,40 @@ enable_table_replication <TABLE_NAME>::
 disable_table_replication <TABLE_NAME>::
   Disable the table replication switch for all its column families.
 
+=== Serial Replication
+
+Note: this feature is introduced in HBase 1.5
+
+.Function of serial replication
+
+Serial replication supports to push logs to the destination cluster in the same order as logs reach to the source cluster.
+
+.Why need serial replication?
+In replication of HBase, we push mutations to destination cluster by reading WAL in each region server. We have a queue for WAL files so we can read them in order of creation time. However, when region-move or RS failure occurs in source cluster, the hlog entries that are not pushed before region-move or RS-failure will be pushed by original RS(for region move) or another RS which takes over the remained hlog of dead RS(for RS failure), and the new entries for the same region(s) will be pushed by the RS which now serves the region(s), but they push the hlog entries of a same region concurrently without coordination.
+
+This treatment can possibly lead to data inconsistency between source and destination clusters:
+
+1. there are put and then delete written to source cluster.
+
+2. due to region-move / RS-failure, they are pushed by different replication-source threads to peer cluster.
+
+3. if delete is pushed to peer cluster before put, and flush and major-compact occurs in peer cluster before put is pushed to peer cluster, the delete is collected and the put remains in peer cluster, but in source cluster the put is masked by the delete, hence data inconsistency between source and destination clusters.
+
+
+.Serial replication configuration
+
+. Set REPLICATION_SCOPE=>2 on the column family which is to be replicated serially when creating tables.
+
+ REPLICATION_SCOPE is a column family level attribute. Its value can be 0, 1 or 2. Value 0 means replication is disabled, 1 means replication is enabled but which not guarantee log order, and 2 means serial replication is enabled.
+
+. This feature relies on zk-less assignment, and conflicts with distributed log replay, so users must set hbase.assignment.usezk=false and hbase.master.distributed.log.replay=false to support this feature.(Note that distributed log replay is deprecated and has already been purged from 2.0)
+
+.Limitations in serial replication
+
+Now we read and push logs in one RS to one peer in one thread, so if one log has not been pushed, all logs after it will be blocked. One wal file may contain wal edits from different tables, if one of the tables(or its CF) which REPLICATION_SCOPE is 2, and it is blocked, then all edits will be blocked, although other tables do not need serial replication. If you want to prevent this, then you need to split these tables/cfs into different peers.
+
+More details about serial replication can be found in link:https://issues.apache.org/jira/browse/HBASE-9465[HBASE-9465].
+
 === Verifying Replicated Data
 
 The `VerifyReplication` MapReduce job, which is included in HBase, performs a systematic comparison of replicated data between two different clusters. Run the VerifyReplication job on the master cluster, supplying it with the peer ID and table name to use for validation. You can limit the verification further by specifying a time range or specific families. The job's short name is `verifyrep`. To run the job, use a command like the following:
@@ -1414,7 +1499,7 @@ A single WAL edit goes through several steps in order to be replicated to a slav
 . The edit is tagged with the master's UUID and added to a buffer.
   When the buffer is filled, or the reader reaches the end of the file, the buffer is sent to a random region server on the slave cluster.
 . The region server reads the edits sequentially and separates them into buffers, one buffer per table.
-  After all edits are read, each buffer is flushed using link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table], HBase's normal client.
+  After all edits are read, each buffer is flushed using link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table], HBase's normal client.
   The master's UUID and the UUIDs of slaves which have already consumed the data are preserved in the edits they are applied, in order to prevent replication loops.
 . In the master, the offset for the WAL that is currently being replicated is registered in ZooKeeper.
 
@@ -2049,7 +2134,7 @@ The act of copying these files creates new HDFS metadata, which is why a restore
 === Live Cluster Backup - Replication
 
 This approach assumes that there is a second cluster.
-See the HBase page on link:http://hbase.apache.org/book.html#replication[replication] for more information.
+See the HBase page on link:https://hbase.apache.org/book.html#_cluster_replication[replication] for more information.
 
 [[ops.backup.live.copytable]]
 === Live Cluster Backup - CopyTable
@@ -2258,7 +2343,7 @@ as in <<snapshots_s3>>.
 - You must be using HBase 1.2 or higher with Hadoop 2.7.1 or
   higher. No version of HBase supports Hadoop 2.7.0.
 - Your hosts must be configured to be aware of the Azure blob storage filesystem.
-  See http://hadoop.apache.org/docs/r2.7.1/hadoop-azure/index.html.
+  See https://hadoop.apache.org/docs/r2.7.1/hadoop-azure/index.html.
 
 After you meet the prerequisites, follow the instructions
 in <<snapshots_s3>>, replacingthe protocol specifier with `wasb://` or `wasbs://`.
@@ -2321,7 +2406,7 @@ See <<gcpause,gcpause>>, <<trouble.log.gc,trouble.log.gc>> and elsewhere (TODO:
 Generally less regions makes for a smoother running cluster (you can always manually split the big regions later (if necessary) to spread the data, or request load, over the cluster); 20-200 regions per RS is a reasonable range.
 The number of regions cannot be configured directly (unless you go for fully <<disable.splitting,disable.splitting>>); adjust the region size to achieve the target region size given table size.
 
-When configuring regions for multiple tables, note that most region settings can be set on a per-table basis via link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html[HTableDescriptor], as well as shell commands.
+When configuring regions for multiple tables, note that most region settings can be set on a per-table basis via link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html[HTableDescriptor], as well as shell commands.
 These settings will override the ones in `hbase-site.xml`.
 That is useful if your tables have different workloads/use cases.
 
@@ -2478,7 +2563,7 @@ void rename(Admin admin, String oldTableName, TableName newTableName) {
 RegionServer Grouping (A.K.A `rsgroup`) is an advanced feature for
 partitioning regionservers into distinctive groups for strict isolation. It
 should only be used by users who are sophisticated enough to understand the
-full implications and have a sufficient background in managing HBase clusters. 
+full implications and have a sufficient background in managing HBase clusters.
 It was developed by Yahoo! and they run it at scale on their large grid cluster.
 See link:http://www.slideshare.net/HBaseCon/keynote-apache-hbase-at-yahoo-scale[HBase at Yahoo! Scale].
 
@@ -2491,20 +2576,20 @@ rsgroup at a time. By default, all tables and regionservers belong to the
 APIs. A custom balancer implementation tracks assignments per rsgroup and makes
 sure to move regions to the relevant regionservers in that rsgroup. The rsgroup
 information is stored in a regular HBase table, and a zookeeper-based read-only
-cache is used at cluster bootstrap time. 
+cache is used at cluster bootstrap time.
 
-To enable, add the following to your hbase-site.xml and restart your Master: 
+To enable, add the following to your hbase-site.xml and restart your Master:
 
 [source,xml]
 ----
- <property> 
-   <name>hbase.coprocessor.master.classes</name> 
-   <value>org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint</value> 
- </property> 
- <property> 
-   <name>hbase.master.loadbalancer.class</name> 
-   <value>org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer</value> 
- </property> 
+ <property>
+   <name>hbase.coprocessor.master.classes</name>
+   <value>org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint</value>
+ </property>
+ <property>
+   <name>hbase.master.loadbalancer.class</name>
+   <value>org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer</value>
+ </property>
 ----
 
 Then use the shell _rsgroup_ commands to create and manipulate RegionServer
@@ -2514,7 +2599,7 @@ rsgroup commands available in the hbase shell type:
 [source, bash]
 ----
  hbase(main):008:0> help ‘rsgroup’
- Took 0.5610 seconds 
+ Took 0.5610 seconds
 ----
 
 High level, you create a rsgroup that is other than the `default` group using
@@ -2531,8 +2616,8 @@ Here is example using a few of the rsgroup  commands. To add a group, do as foll
 
 [source, bash]
 ----
- hbase(main):008:0> add_rsgroup 'my_group' 
- Took 0.5610 seconds 
+ hbase(main):008:0> add_rsgroup 'my_group'
+ Took 0.5610 seconds
 ----
 
 
@@ -2556,11 +2641,11 @@ ERROR: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registere
 ====
 
 Add a server (specified by hostname + port) to the just-made group using the
-_move_servers_rsgroup_ command as follows: 
+_move_servers_rsgroup_ command as follows:
 
 [source, bash]
 ----
- hbase(main):010:0> move_servers_rsgroup 'my_group',['k.att.net:51129'] 
+ hbase(main):010:0> move_servers_rsgroup 'my_group',['k.att.net:51129']
 ----
 
 .Hostname and Port vs ServerName

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/other_info.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/other_info.adoc b/src/main/asciidoc/_chapters/other_info.adoc
index 8bcbe0f..f2dd1b8 100644
--- a/src/main/asciidoc/_chapters/other_info.adoc
+++ b/src/main/asciidoc/_chapters/other_info.adoc
@@ -32,16 +32,14 @@
 === HBase Videos
 
 .Introduction to HBase
-* link:http://www.cloudera.com/content/cloudera/en/resources/library/presentation/chicago_data_summit_apache_hbase_an_introduction_todd_lipcon.html[Introduction to HBase] by Todd Lipcon (Chicago Data Summit 2011).
-* link:http://www.cloudera.com/videos/intorduction-hbase-todd-lipcon[Introduction to HBase] by Todd Lipcon (2010).
-link:http://www.cloudera.com/videos/hadoop-world-2011-presentation-video-building-realtime-big-data-services-at-facebook-with-hadoop-and-hbase[Building Real Time Services at Facebook with HBase] by Jonathan Gray (Hadoop World 2011).
-
-link:http://www.cloudera.com/videos/hw10_video_how_stumbleupon_built_and_advertising_platform_using_hbase_and_hadoop[HBase and Hadoop, Mixing Real-Time and Batch Processing at StumbleUpon] by JD Cryans (Hadoop World 2010).
+* link:https://vimeo.com/23400732[Introduction to HBase] by Todd Lipcon (Chicago Data Summit 2011).
+* link:https://vimeo.com/26804675[Building Real Time Services at Facebook with HBase] by Jonathan Gray (Berlin buzzwords 2011)
+* link:http://www.cloudera.com/videos/hw10_video_how_stumbleupon_built_and_advertising_platform_using_hbase_and_hadoop[The Multiple Uses Of HBase] by Jean-Daniel Cryans(Berlin buzzwords 2011).
 
 [[other.info.pres]]
 === HBase Presentations (Slides)
 
-link:http://www.cloudera.com/content/cloudera/en/resources/library/hadoopworld/hadoop-world-2011-presentation-video-advanced-hbase-schema-design.html[Advanced HBase Schema Design] by Lars George (Hadoop World 2011).
+link:https://www.slideshare.net/cloudera/hadoop-world-2011-advanced-hbase-schema-design-lars-george-cloudera[Advanced HBase Schema Design] by Lars George (Hadoop World 2011).
 
 link:http://www.slideshare.net/cloudera/chicago-data-summit-apache-hbase-an-introduction[Introduction to HBase] by Todd Lipcon (Chicago Data Summit 2011).
 
@@ -61,9 +59,7 @@ link:http://ianvarley.com/UT/MR/Varley_MastersReport_Full_2009-08-07.pdf[No Rela
 
 link:https://blog.cloudera.com/blog/category/hbase/[Cloudera's HBase Blog] has a lot of links to useful HBase information.
 
-* link:https://blog.cloudera.com/blog/2010/04/cap-confusion-problems-with-partition-tolerance/[CAP Confusion] is a relevant entry for background information on distributed storage systems.
-
-link:http://wiki.apache.org/hadoop/HBase/HBasePresentations[HBase Wiki] has a page with a number of presentations.
+link:https://blog.cloudera.com/blog/2010/04/cap-confusion-problems-with-partition-tolerance/[CAP Confusion] is a relevant entry for background information on distributed storage systems.
 
 link:http://refcardz.dzone.com/refcardz/hbase[HBase RefCard] from DZone.
 


[6/6] hbase git commit: update changes for 1.1.13

Posted by nd...@apache.org.
update changes for 1.1.13


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c64bf8a9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c64bf8a9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c64bf8a9

Branch: refs/heads/branch-1.1
Commit: c64bf8a9f35352cd504f2b8f4b02f9148cf45ab6
Parents: 2e9a55b
Author: Nick Dimiduk <nd...@apache.org>
Authored: Thu Nov 30 20:41:12 2017 -0800
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Thu Nov 30 20:41:12 2017 -0800

----------------------------------------------------------------------
 CHANGES.txt | 91 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 91 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/c64bf8a9/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index d3f9013..bd11e47 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -3,6 +3,97 @@ HBase Change Log
 
 
 
+
+Release Notes - HBase - Version 1.1.13 11/30/2017
+
+** Sub-task
+    * [HBASE-18867] - maven enforcer plugin needs update to work with jdk9
+    * [HBASE-18957] - add test that confirms 2 FamilyFilters in a FilterList using MUST_PASS_ONE operator will return results that match either of the FamilyFilters and revert as needed to make it pass.
+    * [HBASE-18980] - Address issues found by error-prone in hbase-hadoop2-compat
+    * [HBASE-19070] - temporarily make the mvnsite nightly test non-voting.
+
+
+
+
+
+
+
+** Bug
+    * [HBASE-14745] - Shade the last few dependencies in hbase-shaded-client
+    * [HBASE-18125] - HBase shell disregards spaces at the end of a split key in a split file
+    * [HBASE-18438] - Precommit doesn't warn about unused imports
+    * [HBASE-18505] - Our build/yetus personality will run tests on individual modules and then on all (i.e. 'root'). Should do one or other
+    * [HBASE-18577] - shaded client includes several non-relocated third party dependencies
+    * [HBASE-18665] - ReversedScannerCallable invokes getRegionLocations incorrectly
+    * [HBASE-18679] - YARN may null Counters object and cause an NPE in ITBLL
+    * [HBASE-18818] - TestConnectionImplemenation fails
+    * [HBASE-18934] - precommit on branch-1 isn't supposed to run against hadoop 3
+    * [HBASE-18940] - branch-2 (and probably others) fail check of generated source artifact
+    * [HBASE-18998] - processor.getRowsToLock() always assumes there is some row being locked
+    * [HBASE-19020] - TestXmlParsing exception checking relies on a particular xml implementation without declaring it.
+    * [HBASE-19030] - nightly runs should attempt to log test results after archiving
+    * [HBASE-19038] - precommit mvn install should run from root on patch
+    * [HBASE-19039] - refactor shadedjars test to only run on java changes.
+    * [HBASE-19055] - Backport HBASE-19042 to other active branches
+    * [HBASE-19058] - The wget isn't installed in building docker image
+    * [HBASE-19060] - "Hadoop check" test is running all the time instead of just when changes to java
+    * [HBASE-19061] - enforcer NPE on hbase-shaded-invariants
+    * [HBASE-19066] - Correct the directory of openjdk-8 for jenkins
+    * [HBASE-19124] - Move HBase-Nightly source artifact creation test from JenkinsFile to a script in dev-support
+    * [HBASE-19137] - Nightly test should make junit reports optional rather than attempt archive after reporting.
+    * [HBASE-19184] - clean up nightly source artifact test to match expectations from switch to git-archive
+    * [HBASE-19223] - Remove references to Date Tiered compaction from branch-1.2 and branch-1.1 ref guide
+    * [HBASE-19229] - Nightly script to check source artifact should not do a destructive git operation without opt-in
+    * [HBASE-19249] - test for "hbase antipatterns" should check _count_ of occurance rather than text of
+    * [HBASE-19393] - HTTP 413 FULL head while accessing HBase UI using SSL. 
+
+
+
+
+
+
+
+
+
+
+** Improvement
+    * [HBASE-18631] - Allow configuration of ChaosMonkey properties via hbase-site
+    * [HBASE-18675] - Making {max,min}SessionTimeout configurable for MiniZooKeeperCluster
+    * [HBASE-19052] - FixedFileTrailer should recognize CellComparatorImpl class in branch-1.x
+    * [HBASE-19140] - hbase-cleanup.sh uses deprecated call to remove files in hdfs
+    * [HBASE-19227] - Nightly jobs should archive JVM dumpstream files
+    * [HBASE-19228] - nightly job should gather machine stats.
+
+
+
+** New Feature
+    * [HBASE-19189] - Ad-hoc test job for running a subset of tests lots of times
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+** Task
+    * [HBASE-16459] - Remove unused hbase shell --format option
+    * [HBASE-18833] - Ensure precommit personality is up to date on all active branches
+    * [HBASE-19097] - update testing to use Apache Yetus Test Patch version 0.6.0
+
+
+
+
+
 Release Notes - HBase - Version 1.1.12 08/12/2017
 
 ** Sub-task


[2/6] hbase git commit: updating docs from master

Posted by nd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/performance.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/performance.adoc b/src/main/asciidoc/_chapters/performance.adoc
index 114754f..c917646 100644
--- a/src/main/asciidoc/_chapters/performance.adoc
+++ b/src/main/asciidoc/_chapters/performance.adoc
@@ -320,7 +320,7 @@ See also <<perf.compression.however>> for compression caveats.
 [[schema.regionsize]]
 === Table RegionSize
 
-The regionsize can be set on a per-table basis via `setFileSize` on link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html[HTableDescriptor] in the event where certain tables require different regionsizes than the configured default regionsize.
+The regionsize can be set on a per-table basis via `setFileSize` on link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html[HTableDescriptor] in the event where certain tables require different regionsizes than the configured default regionsize.
 
 See <<ops.capacity.regions>> for more information.
 
@@ -372,7 +372,7 @@ Bloom filters are enabled on a Column Family.
 You can do this by using the setBloomFilterType method of HColumnDescriptor or using the HBase API.
 Valid values are `NONE`, `ROW` (default), or `ROWCOL`.
 See <<bloom.filters.when>> for more information on `ROW` versus `ROWCOL`.
-See also the API documentation for link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor].
+See also the API documentation for link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor].
 
 The following example creates a table and enables a ROWCOL Bloom filter on the `colfam1` column family.
 
@@ -431,7 +431,7 @@ The blocksize can be configured for each ColumnFamily in a table, and defaults t
 Larger cell values require larger blocksizes.
 There is an inverse relationship between blocksize and the resulting StoreFile indexes (i.e., if the blocksize is doubled then the resulting indexes should be roughly halved).
 
-See link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor] and <<store>>for more information.
+See link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor] and <<store>>for more information.
 
 [[cf.in.memory]]
 === In-Memory ColumnFamilies
@@ -440,7 +440,7 @@ ColumnFamilies can optionally be defined as in-memory.
 Data is still persisted to disk, just like any other ColumnFamily.
 In-memory blocks have the highest priority in the <<block.cache>>, but it is not a guarantee that the entire table will be in memory.
 
-See link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor] for more information.
+See link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor] for more information.
 
 [[perf.compression]]
 === Compression
@@ -549,19 +549,9 @@ If deferred log flush is used, WAL edits are kept in memory until the flush peri
 The benefit is aggregated and asynchronous `WAL`- writes, but the potential downside is that if the RegionServer goes down the yet-to-be-flushed edits are lost.
 This is safer, however, than not using WAL at all with Puts.
 
-Deferred log flush can be configured on tables via link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html[HTableDescriptor].
+Deferred log flush can be configured on tables via link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html[HTableDescriptor].
 The default value of `hbase.regionserver.optionallogflushinterval` is 1000ms.
 
-[[perf.hbase.client.autoflush]]
-=== HBase Client: AutoFlush
-
-When performing a lot of Puts, make sure that setAutoFlush is set to false on your link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table] instance.
-Otherwise, the Puts will be sent one at a time to the RegionServer.
-Puts added via `table.add(Put)` and `table.add( <List> Put)` wind up in the same write buffer.
-If `autoFlush = false`, these messages are not sent until the write-buffer is filled.
-To explicitly flush the messages, call `flushCommits`.
-Calling `close` on the `Table` instance will invoke `flushCommits`.
-
 [[perf.hbase.client.putwal]]
 === HBase Client: Turn off WAL on Puts
 
@@ -584,7 +574,7 @@ There is a utility `HTableUtil` currently on MASTER that does this, but you can
 [[perf.hbase.write.mr.reducer]]
 === MapReduce: Skip The Reducer
 
-When writing a lot of data to an HBase table from a MR job (e.g., with link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.html[TableOutputFormat]), and specifically where Puts are being emitted from the Mapper, skip the Reducer step.
+When writing a lot of data to an HBase table from a MR job (e.g., with link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.html[TableOutputFormat]), and specifically where Puts are being emitted from the Mapper, skip the Reducer step.
 When a Reducer step is used, all of the output (Puts) from the Mapper will get spooled to disk, then sorted/shuffled to other Reducers that will most likely be off-node.
 It's far more efficient to just write directly to HBase.
 
@@ -597,7 +587,7 @@ If all your data is being written to one region at a time, then re-read the sect
 
 Also, if you are pre-splitting regions and all your data is _still_ winding up in a single region even though your keys aren't monotonically increasing, confirm that your keyspace actually works with the split strategy.
 There are a variety of reasons that regions may appear "well split" but won't work with your data.
-As the HBase client communicates directly with the RegionServers, this can be obtained via link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#getRegionLocation(byte%5B%5D)[Table.getRegionLocation].
+As the HBase client communicates directly with the RegionServers, this can be obtained via link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/RegionLocator.html#getRegionLocation-byte:A-[RegionLocator.getRegionLocation].
 
 See <<precreate.regions>>, as well as <<perf.configurations>>
 
@@ -610,7 +600,7 @@ For example, here is a good general thread on what to look at addressing read-ti
 [[perf.hbase.client.caching]]
 === Scan Caching
 
-If HBase is used as an input source for a MapReduce job, for example, make sure that the input link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[Scan] instance to the MapReduce job has `setCaching` set to something greater than the default (which is 1). Using the default value means that the map-task will make call back to the region-server for every record processed.
+If HBase is used as an input source for a MapReduce job, for example, make sure that the input link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[Scan] instance to the MapReduce job has `setCaching` set to something greater than the default (which is 1). Using the default value means that the map-task will make call back to the region-server for every record processed.
 Setting this value to 500, for example, will transfer 500 rows at a time to the client to be processed.
 There is a cost/benefit to have the cache value be large because it costs more in memory for both client and RegionServer, so bigger isn't always better.
 
@@ -659,7 +649,7 @@ For MapReduce jobs that use HBase tables as a source, if there a pattern where t
 === Close ResultScanners
 
 This isn't so much about improving performance but rather _avoiding_ performance problems.
-If you forget to close link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/ResultScanner.html[ResultScanners] you can cause problems on the RegionServers.
+If you forget to close link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/ResultScanner.html[ResultScanners] you can cause problems on the RegionServers.
 Always have ResultScanner processing enclosed in try/catch blocks.
 
 [source,java]
@@ -679,7 +669,7 @@ table.close();
 [[perf.hbase.client.blockcache]]
 === Block Cache
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[Scan] instances can be set to use the block cache in the RegionServer via the `setCacheBlocks` method.
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[Scan] instances can be set to use the block cache in the RegionServer via the `setCacheBlocks` method.
 For input Scans to MapReduce jobs, this should be `false`.
 For frequently accessed rows, it is advisable to use the block cache.
 
@@ -689,8 +679,8 @@ See <<offheap.blockcache>>
 [[perf.hbase.client.rowkeyonly]]
 === Optimal Loading of Row Keys
 
-When performing a table link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[scan] where only the row keys are needed (no families, qualifiers, values or timestamps), add a FilterList with a `MUST_PASS_ALL` operator to the scanner using `setFilter`.
-The filter list should include both a link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.html[FirstKeyOnlyFilter] and a link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.html[KeyOnlyFilter].
+When performing a table link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[scan] where only the row keys are needed (no families, qualifiers, values or timestamps), add a FilterList with a `MUST_PASS_ALL` operator to the scanner using `setFilter`.
+The filter list should include both a link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.html[FirstKeyOnlyFilter] and a link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.html[KeyOnlyFilter].
 Using this filter combination will result in a worst case scenario of a RegionServer reading a single value from disk and minimal network traffic to the client for a single row.
 
 [[perf.hbase.read.dist]]
@@ -709,7 +699,7 @@ Enabling Bloom Filters can save your having to go to disk and can help improve r
 link:http://en.wikipedia.org/wiki/Bloom_filter[Bloom filters] were developed over in link:https://issues.apache.org/jira/browse/HBASE-1200[HBase-1200 Add bloomfilters].
 For description of the development process -- why static blooms rather than dynamic -- and for an overview of the unique properties that pertain to blooms in HBase, as well as possible future directions, see the _Development Process_ section of the document link:https://issues.apache.org/jira/secure/attachment/12444007/Bloom_Filters_in_HBase.pdf[BloomFilters in HBase] attached to link:https://issues.apache.org/jira/browse/HBASE-1200[HBASE-1200].
 The bloom filters described here are actually version two of blooms in HBase.
-In versions up to 0.19.x, HBase had a dynamic bloom option based on work done by the link:http://www.one-lab.org/[European Commission One-Lab Project 034819].
+In versions up to 0.19.x, HBase had a dynamic bloom option based on work done by the link:http://www.onelab.org[European Commission One-Lab Project 034819].
 The core of the HBase bloom work was later pulled up into Hadoop to implement org.apache.hadoop.io.BloomMapFile.
 Version 1 of HBase blooms never worked that well.
 Version 2 is a rewrite from scratch though again it starts with the one-lab work.
@@ -826,7 +816,7 @@ In this case, special care must be taken to regularly perform major compactions
 As is documented in <<datamodel>>, marking rows as deleted creates additional StoreFiles which then need to be processed on reads.
 Tombstones only get cleaned up with major compactions.
 
-See also <<compaction>> and link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Admin.html#majorCompact%28java.lang.String%29[Admin.majorCompact].
+See also <<compaction>> and link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Admin.html#majorCompact-org.apache.hadoop.hbase.TableName-[Admin.majorCompact].
 
 [[perf.deleting.rpc]]
 === Delete RPC Behavior
@@ -835,8 +825,7 @@ Be aware that `Table.delete(Delete)` doesn't use the writeBuffer.
 It will execute an RegionServer RPC with each invocation.
 For a large number of deletes, consider `Table.delete(List)`.
 
-See
-+++<a href="http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#delete%28org.apache.hadoop.hbase.client.Delete%29">hbase.client.Delete</a>+++.
+See link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#delete-org.apache.hadoop.hbase.client.Delete-[hbase.client.Delete]
 
 [[perf.hdfs]]
 == HDFS

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/preface.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/preface.adoc b/src/main/asciidoc/_chapters/preface.adoc
index ed2ca7a..280f2d8 100644
--- a/src/main/asciidoc/_chapters/preface.adoc
+++ b/src/main/asciidoc/_chapters/preface.adoc
@@ -27,11 +27,11 @@
 :icons: font
 :experimental:
 
-This is the official reference guide for the link:http://hbase.apache.org/[HBase] version it ships with.
+This is the official reference guide for the link:https://hbase.apache.org/[HBase] version it ships with.
 
 Herein you will find either the definitive documentation on an HBase topic as of its
 standing when the referenced HBase version shipped, or it will point to the location
-in link:http://hbase.apache.org/apidocs/index.html[Javadoc] or
+in link:https://hbase.apache.org/apidocs/index.html[Javadoc] or
 link:https://issues.apache.org/jira/browse/HBASE[JIRA] where the pertinent information can be found.
 
 .About This Guide

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/protobuf.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/protobuf.adoc b/src/main/asciidoc/_chapters/protobuf.adoc
index 8c73dd0..ad7e378 100644
--- a/src/main/asciidoc/_chapters/protobuf.adoc
+++ b/src/main/asciidoc/_chapters/protobuf.adoc
@@ -29,7 +29,7 @@
 
 
 == Protobuf
-HBase uses Google's link:http://protobuf.protobufs[protobufs] wherever
+HBase uses Google's link:https://developers.google.com/protocol-buffers/[protobufs] wherever
 it persists metadata -- in the tail of hfiles or Cells written by
 HBase into the system hbase:meta table or when HBase writes znodes
 to zookeeper, etc. -- and when it passes objects over the wire making

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/rpc.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/rpc.adoc b/src/main/asciidoc/_chapters/rpc.adoc
index 1d363eb..fbfba6c 100644
--- a/src/main/asciidoc/_chapters/rpc.adoc
+++ b/src/main/asciidoc/_chapters/rpc.adoc
@@ -28,7 +28,7 @@
 :icons: font
 :experimental:
 
-In 0.95, all client/server communication is done with link:https://developers.google.com/protocol-buffers/[protobuf'ed] Messages rather than with link:http://hadoop.apache.org/docs/current/api/org/apache/hadoop/io/Writable.html[Hadoop
+In 0.95, all client/server communication is done with link:https://developers.google.com/protocol-buffers/[protobuf'ed] Messages rather than with link:https://hadoop.apache.org/docs/current/api/org/apache/hadoop/io/Writable.html[Hadoop
             Writables].
 Our RPC wire format therefore changes.
 This document describes the client/server request/response protocol and our new RPC wire-format.

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/schema_design.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/schema_design.adoc b/src/main/asciidoc/_chapters/schema_design.adoc
index cef05f2..4cd7656 100644
--- a/src/main/asciidoc/_chapters/schema_design.adoc
+++ b/src/main/asciidoc/_chapters/schema_design.adoc
@@ -47,7 +47,7 @@ See also Robert Yokota's link:https://blogs.apache.org/hbase/entry/hbase-applica
 [[schema.creation]]
 ==  Schema Creation
 
-HBase schemas can be created or updated using the <<shell>> or by using link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Admin.html[Admin] in the Java API.
+HBase schemas can be created or updated using the <<shell>> or by using link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Admin.html[Admin] in the Java API.
 
 Tables must be disabled when making ColumnFamily modifications, for example:
 
@@ -223,7 +223,7 @@ You could also optimize things so that certain pairs of keys were always in the
 A third common trick for preventing hotspotting is to reverse a fixed-width or numeric row key so that the part that changes the most often (the least significant digit) is first.
 This effectively randomizes row keys, but sacrifices row ordering properties.
 
-See https://communities.intel.com/community/itpeernetwork/datastack/blog/2013/11/10/discussion-on-designing-hbase-tables, and link:http://phoenix.apache.org/salted.html[article on Salted Tables] from the Phoenix project, and the discussion in the comments of link:https://issues.apache.org/jira/browse/HBASE-11682[HBASE-11682] for more information about avoiding hotspotting.
+See https://communities.intel.com/community/itpeernetwork/datastack/blog/2013/11/10/discussion-on-designing-hbase-tables, and link:https://phoenix.apache.org/salted.html[article on Salted Tables] from the Phoenix project, and the discussion in the comments of link:https://issues.apache.org/jira/browse/HBASE-11682[HBASE-11682] for more information about avoiding hotspotting.
 
 [[timeseries]]
 ===  Monotonically Increasing Row Keys/Timeseries Data
@@ -338,7 +338,7 @@ This is the main trade-off.
 ====
 link:https://issues.apache.org/jira/browse/HBASE-4811[HBASE-4811] implements an API to scan a table or a range within a table in reverse, reducing the need to optimize your schema for forward or reverse scanning.
 This feature is available in HBase 0.98 and later.
-See https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html#setReversed%28boolean for more information.
+See link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html#setReversed-boolean-[Scan.setReversed()] for more information.
 ====
 
 A common problem in database processing is quickly finding the most recent version of a value.
@@ -433,7 +433,7 @@ public static byte[][] getHexSplits(String startKey, String endKey, int numRegio
 [[schema.versions.max]]
 === Maximum Number of Versions
 
-The maximum number of row versions to store is configured per column family via link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor].
+The maximum number of row versions to store is configured per column family via link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor].
 The default for max versions is 1.
 This is an important parameter because as described in <<datamodel>> section HBase does _not_ overwrite row values, but rather stores different values per row by time (and qualifier). Excess versions are removed during major compactions.
 The number of max versions may need to be increased or decreased depending on application needs.
@@ -443,14 +443,14 @@ It is not recommended setting the number of max versions to an exceedingly high
 [[schema.minversions]]
 ===  Minimum Number of Versions
 
-Like maximum number of row versions, the minimum number of row versions to keep is configured per column family via link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor].
+Like maximum number of row versions, the minimum number of row versions to keep is configured per column family via link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor].
 The default for min versions is 0, which means the feature is disabled.
 The minimum number of row versions parameter is used together with the time-to-live parameter and can be combined with the number of row versions parameter to allow configurations such as "keep the last T minutes worth of data, at most N versions, _but keep at least M versions around_" (where M is the value for minimum number of row versions, M<N). This parameter should only be set when time-to-live is enabled for a column family and must be less than the number of row versions.
 
 [[supported.datatypes]]
 ==  Supported Datatypes
 
-HBase supports a "bytes-in/bytes-out" interface via link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Put.html[Put] and link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Result.html[Result], so anything that can be converted to an array of bytes can be stored as a value.
+HBase supports a "bytes-in/bytes-out" interface via link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Put.html[Put] and link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Result.html[Result], so anything that can be converted to an array of bytes can be stored as a value.
 Input could be strings, numbers, complex objects, or even images as long as they can rendered as bytes.
 
 There are practical limits to the size of values (e.g., storing 10-50MB objects in HBase would probably be too much to ask); search the mailing list for conversations on this topic.
@@ -459,7 +459,7 @@ Take that into consideration when making your design, as well as block size for
 
 === Counters
 
-One supported datatype that deserves special mention are "counters" (i.e., the ability to do atomic increments of numbers). See link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#increment%28org.apache.hadoop.hbase.client.Increment%29[Increment] in `Table`.
+One supported datatype that deserves special mention are "counters" (i.e., the ability to do atomic increments of numbers). See link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#increment%28org.apache.hadoop.hbase.client.Increment%29[Increment] in `Table`.
 
 Synchronization on counters are done on the RegionServer, not in the client.
 
@@ -479,7 +479,7 @@ Store files which contains only expired rows are deleted on minor compaction.
 Setting `hbase.store.delete.expired.storefile` to `false` disables this feature.
 Setting minimum number of versions to other than 0 also disables this.
 
-See link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor] for more information.
+See link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor] for more information.
 
 Recent versions of HBase also support setting time to live on a per cell basis.
 See link:https://issues.apache.org/jira/browse/HBASE-10560[HBASE-10560] for more information.
@@ -494,7 +494,7 @@ There are two notable differences between cell TTL handling and ColumnFamily TTL
 ==  Keeping Deleted Cells
 
 By default, delete markers extend back to the beginning of time.
-Therefore, link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Get.html[Get] or link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[Scan] operations will not see a deleted cell (row or column), even when the Get or Scan operation indicates a time range before the delete marker was placed.
+Therefore, link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Get.html[Get] or link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[Scan] operations will not see a deleted cell (row or column), even when the Get or Scan operation indicates a time range before the delete marker was placed.
 
 ColumnFamilies can optionally keep deleted cells.
 In this case, deleted cells can still be retrieved, as long as these operations specify a time range that ends before the timestamp of any delete that would affect the cells.
@@ -684,7 +684,7 @@ in the table (e.g. make sure values are in the range 1-10). Constraints could
 also be used to enforce referential integrity, but this is strongly discouraged
 as it will dramatically decrease the write throughput of the tables where integrity
 checking is enabled. Extensive documentation on using Constraints can be found at
-link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/constraint/Constraint.html[Constraint]
+link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/constraint/Constraint.html[Constraint]
 since version 0.94.
 
 [[schema.casestudies]]
@@ -760,7 +760,7 @@ Neither approach is wrong, it just depends on what is most appropriate for the s
 ====
 link:https://issues.apache.org/jira/browse/HBASE-4811[HBASE-4811] implements an API to scan a table or a range within a table in reverse, reducing the need to optimize your schema for forward or reverse scanning.
 This feature is available in HBase 0.98 and later.
-See https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html#setReversed%28boolean for more information.
+See link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html#setReversed-boolean-[Scan.setReversed()] for more information.
 ====
 
 [[schema.casestudies.log_timeseries.varkeys]]
@@ -789,8 +789,7 @@ The rowkey of LOG_TYPES would be:
 * `[bytes]` variable length bytes for raw hostname or event-type.
 
 A column for this rowkey could be a long with an assigned number, which could be obtained
-by using an
-+++<a href="http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#incrementColumnValue%28byte[],%20byte[],%20byte[],%20long%29">HBase counter</a>+++.
+by using an link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#incrementColumnValue-byte:A-byte:A-byte:A-long-[HBase counter]
 
 So the resulting composite rowkey would be:
 
@@ -806,7 +805,7 @@ In either the Hash or Numeric substitution approach, the raw values for hostname
 This effectively is the OpenTSDB approach.
 What OpenTSDB does is re-write data and pack rows into columns for certain time-periods.
 For a detailed explanation, see: http://opentsdb.net/schema.html, and
-+++<a href="http://www.cloudera.com/content/cloudera/en/resources/library/hbasecon/video-hbasecon-2012-lessons-learned-from-opentsdb.html">Lessons Learned from OpenTSDB</a>+++
+link:https://www.slideshare.net/cloudera/4-opentsdb-hbasecon[Lessons Learned from OpenTSDB]
 from HBaseCon2012.
 
 But this is how the general concept works: data is ingested, for example, in this manner...
@@ -1096,7 +1095,7 @@ The tl;dr version is that you should probably go with one row per user+value, an
 
 Your two options mirror a common question people have when designing HBase schemas: should I go "tall" or "wide"? Your first schema is "tall": each row represents one value for one user, and so there are many rows in the table for each user; the row key is user + valueid, and there would be (presumably) a single column qualifier that means "the value". This is great if you want to scan over rows in sorted order by row key (thus my question above, about whether these ids are sorted correctly). You can start a scan at any user+valueid, read the next 30, and be done.
 What you're giving up is the ability to have transactional guarantees around all the rows for one user, but it doesn't sound like you need that.
-Doing it this way is generally recommended (see here http://hbase.apache.org/book.html#schema.smackdown).
+Doing it this way is generally recommended (see here https://hbase.apache.org/book.html#schema.smackdown).
 
 Your second option is "wide": you store a bunch of values in one row, using different qualifiers (where the qualifier is the valueid). The simple way to do that would be to just store ALL values for one user in a single row.
 I'm guessing you jumped to the "paginated" version because you're assuming that storing millions of columns in a single row would be bad for performance, which may or may not be true; as long as you're not trying to do too much in a single request, or do things like scanning over and returning all of the cells in the row, it shouldn't be fundamentally worse.
@@ -1113,7 +1112,7 @@ If you don't have time to build it both ways and compare, my advice would be to
 [[schema.ops]]
 == Operational and Performance Configuration Options
 
-====  Tune HBase Server RPC Handling
+===  Tune HBase Server RPC Handling
 
 * Set `hbase.regionserver.handler.count` (in `hbase-site.xml`) to cores x spindles for concurrency.
 * Optionally, split the call queues into separate read and write queues for differentiated service. The parameter `hbase.ipc.server.callqueue.handler.factor` specifies the number of call queues:
@@ -1129,7 +1128,7 @@ If you don't have time to build it both ways and compare, my advice would be to
 - `< 0.5` for more short-read
 - `> 0.5` for more long-read
 
-====  Disable Nagle for RPC
+===  Disable Nagle for RPC
 
 Disable Nagle’s algorithm. Delayed ACKs can add up to ~200ms to RPC round trip time. Set the following parameters:
 
@@ -1140,7 +1139,7 @@ Disable Nagle’s algorithm. Delayed ACKs can add up to ~200ms to RPC round trip
 - `hbase.ipc.client.tcpnodelay = true`
 - `hbase.ipc.server.tcpnodelay = true`
 
-====  Limit Server Failure Impact
+===  Limit Server Failure Impact
 
 Detect regionserver failure as fast as reasonable. Set the following parameters:
 
@@ -1149,7 +1148,7 @@ Detect regionserver failure as fast as reasonable. Set the following parameters:
 - `dfs.namenode.avoid.read.stale.datanode = true`
 - `dfs.namenode.avoid.write.stale.datanode = true`
 
-====  Optimize on the Server Side for Low Latency
+===  Optimize on the Server Side for Low Latency
 
 * Skip the network for local blocks. In `hbase-site.xml`, set the following parameters:
 - `dfs.client.read.shortcircuit = true`
@@ -1187,7 +1186,7 @@ Detect regionserver failure as fast as reasonable. Set the following parameters:
 
 ==  Special Cases
 
-====  For applications where failing quickly is better than waiting
+===  For applications where failing quickly is better than waiting
 
 *  In `hbase-site.xml` on the client side, set the following parameters:
 - Set `hbase.client.pause = 1000`
@@ -1196,7 +1195,7 @@ Detect regionserver failure as fast as reasonable. Set the following parameters:
 - Set the RecoverableZookeeper retry count: `zookeeper.recovery.retry = 1` (no retry)
 * In `hbase-site.xml` on the server side, set the Zookeeper session timeout for detecting server failures: `zookeeper.session.timeout` <= 30 seconds (20-30 is good).
 
-====  For applications that can tolerate slightly out of date information
+===  For applications that can tolerate slightly out of date information
 
 **HBase timeline consistency (HBASE-10070) **
 With read replicas enabled, read-only copies of regions (replicas) are distributed over the cluster. One RegionServer services the default or primary replica, which is the only replica that can service writes. Other RegionServers serve the secondary replicas, follow the primary RegionServer, and only see committed updates. The secondary replicas are read-only, but can serve reads immediately while the primary is failing over, cutting read availability blips from seconds to milliseconds. Phoenix supports timeline consistency as of 4.4.0

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/security.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/security.adoc b/src/main/asciidoc/_chapters/security.adoc
index ccb5adb..cca9364 100644
--- a/src/main/asciidoc/_chapters/security.adoc
+++ b/src/main/asciidoc/_chapters/security.adoc
@@ -354,7 +354,7 @@ grant 'rest_server', 'RWCA'
 
 For more information about ACLs, please see the <<hbase.accesscontrol.configuration>> section
 
-HBase REST gateway supports link:http://hadoop.apache.org/docs/stable/hadoop-auth/index.html[SPNEGO HTTP authentication] for client access to the gateway.
+HBase REST gateway supports link:https://hadoop.apache.org/docs/stable/hadoop-auth/index.html[SPNEGO HTTP authentication] for client access to the gateway.
 To enable REST gateway Kerberos authentication for client access, add the following to the `hbase-site.xml` file for every REST gateway.
 
 [source,xml]
@@ -390,7 +390,7 @@ Substitute the keytab for HTTP for _$KEYTAB_.
 
 HBase REST gateway supports different 'hbase.rest.authentication.type': simple, kerberos.
 You can also implement a custom authentication by implementing Hadoop AuthenticationHandler, then specify the full class name as 'hbase.rest.authentication.type' value.
-For more information, refer to link:http://hadoop.apache.org/docs/stable/hadoop-auth/index.html[SPNEGO HTTP authentication].
+For more information, refer to link:https://hadoop.apache.org/docs/stable/hadoop-auth/index.html[SPNEGO HTTP authentication].
 
 [[security.rest.gateway]]
 === REST Gateway Impersonation Configuration
@@ -989,7 +989,7 @@ hbase> help "scan"
 ----
 
 +
-This example grants read access to the 'testuser' user and read/write access to the 'developers' group, on cells in the 'pii' column which match the filter.
+If you need to enable cell acl,the hfile.format.version option in hbase-site.xml should be greater than or equal to 3,and the hbase.security.access.early_out option should be set to false.This example grants read access to the 'testuser' user and read/write access to the 'developers' group, on cells in the 'pii' column which match the filter.
 +
 ----
 hbase> grant 'user', \
@@ -1390,11 +1390,11 @@ When you issue a Scan or Get, HBase uses your default set of authorizations to
 filter out cells that you do not have access to. A superuser can set the default
 set of authorizations for a given user by using the `set_auths` HBase Shell command
 or the
-link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityClient.html#setAuths(org.apache.hadoop.hbase.client.Connection,%20java.lang.String\[\],%20java.lang.String)[VisibilityClient.setAuths()] method.
+link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityClient.html#setAuths-org.apache.hadoop.hbase.client.Connection-java.lang.String:A-java.lang.String-[VisibilityClient.setAuths()] method.
 
 You can specify a different authorization during the Scan or Get, by passing the
 AUTHORIZATIONS option in HBase Shell, or the
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html#setAuthorizations%28org.apache.hadoop.hbase.security.visibility.Authorizations%29[setAuthorizations()]
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html#setAuthorizations-org.apache.hadoop.hbase.security.visibility.Authorizations-[Scan.setAuthorizations()]
 method if you use the API. This authorization will be combined with your default
 set as an additional filter. It will further filter your results, rather than
 giving you additional authorization.
@@ -1644,7 +1644,7 @@ Rotate the Master Key::
 
 Bulk loading in secure mode is a bit more involved than normal setup, since the client has to transfer the ownership of the files generated from the MapReduce job to HBase.
 Secure bulk loading is implemented by a coprocessor, named
-link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html[SecureBulkLoadEndpoint],
+link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html[SecureBulkLoadEndpoint],
 which uses a staging directory configured by the configuration property `hbase.bulkload.staging.dir`, which defaults to
 _/tmp/hbase-staging/_.
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/spark.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/spark.adoc b/src/main/asciidoc/_chapters/spark.adoc
index 774d137..416457b 100644
--- a/src/main/asciidoc/_chapters/spark.adoc
+++ b/src/main/asciidoc/_chapters/spark.adoc
@@ -27,7 +27,7 @@
 :icons: font
 :experimental:
 
-link:http://spark.apache.org/[Apache Spark] is a software framework that is used
+link:https://spark.apache.org/[Apache Spark] is a software framework that is used
 to process data in memory in a distributed manner, and is replacing MapReduce in
 many use cases.
 
@@ -151,7 +151,7 @@ access to HBase
 For examples of all these functionalities, see the HBase-Spark Module.
 
 == Spark Streaming
-http://spark.apache.org/streaming/[Spark Streaming] is a micro batching stream
+https://spark.apache.org/streaming/[Spark Streaming] is a micro batching stream
 processing framework built on top of Spark. HBase and Spark Streaming make great
 companions in that HBase can help serve the following benefits alongside Spark
 Streaming.

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/sql.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/sql.adoc b/src/main/asciidoc/_chapters/sql.adoc
index b1ad063..f1c445d 100644
--- a/src/main/asciidoc/_chapters/sql.adoc
+++ b/src/main/asciidoc/_chapters/sql.adoc
@@ -33,10 +33,10 @@ The following projects offer some support for SQL over HBase.
 [[phoenix]]
 === Apache Phoenix
 
-link:http://phoenix.apache.org[Apache Phoenix]
+link:https://phoenix.apache.org[Apache Phoenix]
 
 === Trafodion
 
-link:http://trafodion.incubator.apache.org/[Trafodion: Transactional SQL-on-HBase]
+link:https://trafodion.incubator.apache.org/[Trafodion: Transactional SQL-on-HBase]
 
 :numbered:

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/thrift_filter_language.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/thrift_filter_language.adoc b/src/main/asciidoc/_chapters/thrift_filter_language.adoc
index da36cea..1c1279d 100644
--- a/src/main/asciidoc/_chapters/thrift_filter_language.adoc
+++ b/src/main/asciidoc/_chapters/thrift_filter_language.adoc
@@ -28,7 +28,7 @@
 :experimental:
 
 
-Apache link:http://thrift.apache.org/[Thrift] is a cross-platform, cross-language development framework.
+Apache link:https://thrift.apache.org/[Thrift] is a cross-platform, cross-language development framework.
 HBase includes a Thrift API and filter language.
 The Thrift API relies on client and server processes.
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/tracing.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/tracing.adoc b/src/main/asciidoc/_chapters/tracing.adoc
index 0cddd8a..8bd1962 100644
--- a/src/main/asciidoc/_chapters/tracing.adoc
+++ b/src/main/asciidoc/_chapters/tracing.adoc
@@ -30,7 +30,7 @@
 :icons: font
 :experimental:
 
-link:https://issues.apache.org/jira/browse/HBASE-6449[HBASE-6449] added support for tracing requests through HBase, using the open source tracing library, link:http://htrace.incubator.apache.org/[HTrace].
+link:https://issues.apache.org/jira/browse/HBASE-6449[HBASE-6449] added support for tracing requests through HBase, using the open source tracing library, link:https://htrace.incubator.apache.org/[HTrace].
 Setting up tracing is quite simple, however it currently requires some very minor changes to your client code (it would not be very difficult to remove this requirement).
 
 [[tracing.spanreceivers]]
@@ -57,7 +57,7 @@ The `LocalFileSpanReceiver` looks in _hbase-site.xml_      for a `hbase.local-fi
 
 <property>
   <name>hbase.trace.spanreceiver.classes</name>
-  <value>org.apache.htrace.impl.LocalFileSpanReceiver</value>
+  <value>org.apache.htrace.core.LocalFileSpanReceiver</value>
 </property>
 <property>
   <name>hbase.htrace.local-file-span-receiver.path</name>
@@ -67,7 +67,7 @@ The `LocalFileSpanReceiver` looks in _hbase-site.xml_      for a `hbase.local-fi
 
 HTrace also provides `ZipkinSpanReceiver` which converts spans to link:http://github.com/twitter/zipkin[Zipkin] span format and send them to Zipkin server. In order to use this span receiver, you need to install the jar of htrace-zipkin to your HBase's classpath on all of the nodes in your cluster.
 
-_htrace-zipkin_ is published to the link:http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22org.apache.htrace%22%20AND%20a%3A%22htrace-zipkin%22[Maven central repository]. You could get the latest version from there or just build it locally (see the link:http://htrace.incubator.apache.org/[HTrace] homepage for information on how to do this) and then copy it out to all nodes.
+_htrace-zipkin_ is published to the link:http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22org.apache.htrace%22%20AND%20a%3A%22htrace-zipkin%22[Maven central repository]. You could get the latest version from there or just build it locally (see the link:https://htrace.incubator.apache.org/[HTrace] homepage for information on how to do this) and then copy it out to all nodes.
 
 `ZipkinSpanReceiver` for properties called `hbase.htrace.zipkin.collector-hostname` and `hbase.htrace.zipkin.collector-port` in _hbase-site.xml_ with values describing the Zipkin collector server to which span information are sent.
 
@@ -76,7 +76,7 @@ _htrace-zipkin_ is published to the link:http://search.maven.org/#search%7Cgav%7
 
 <property>
   <name>hbase.trace.spanreceiver.classes</name>
-  <value>org.apache.htrace.impl.ZipkinSpanReceiver</value>
+  <value>org.apache.htrace.core.ZipkinSpanReceiver</value>
 </property>
 <property>
   <name>hbase.htrace.zipkin.collector-hostname</name>

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/troubleshooting.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/troubleshooting.adoc b/src/main/asciidoc/_chapters/troubleshooting.adoc
index 1cf93d6..ec0a34d 100644
--- a/src/main/asciidoc/_chapters/troubleshooting.adoc
+++ b/src/main/asciidoc/_chapters/troubleshooting.adoc
@@ -225,7 +225,7 @@ Search here first when you have an issue as its more than likely someone has alr
 [[trouble.resources.lists]]
 === Mailing Lists
 
-Ask a question on the link:http://hbase.apache.org/mail-lists.html[Apache HBase mailing lists].
+Ask a question on the link:https://hbase.apache.org/mail-lists.html[Apache HBase mailing lists].
 The 'dev' mailing list is aimed at the community of developers actually building Apache HBase and for features currently under development, and 'user' is generally used for questions on released versions of Apache HBase.
 Before going to the mailing list, make sure your question has not already been answered by searching the mailing list archives first.
 Use <<trouble.resources.searchhadoop>>.
@@ -596,7 +596,7 @@ See also Jesse Andersen's link:http://blog.cloudera.com/blog/2014/04/how-to-use-
 In some situations clients that fetch data from a RegionServer get a LeaseException instead of the usual <<trouble.client.scantimeout>>.
 Usually the source of the exception is `org.apache.hadoop.hbase.regionserver.Leases.removeLease(Leases.java:230)` (line number may vary). It tends to happen in the context of a slow/freezing `RegionServer#next` call.
 It can be prevented by having `hbase.rpc.timeout` > `hbase.regionserver.lease.period`.
-Harsh J investigated the issue as part of the mailing list thread link:http://mail-archives.apache.org/mod_mbox/hbase-user/201209.mbox/%3CCAOcnVr3R-LqtKhFsk8Bhrm-YW2i9O6J6Fhjz2h7q6_sxvwd2yw%40mail.gmail.com%3E[HBase, mail # user - Lease does not exist exceptions]
+Harsh J investigated the issue as part of the mailing list thread link:https://mail-archives.apache.org/mod_mbox/hbase-user/201209.mbox/%3CCAOcnVr3R-LqtKhFsk8Bhrm-YW2i9O6J6Fhjz2h7q6_sxvwd2yw%40mail.gmail.com%3E[HBase, mail # user - Lease does not exist exceptions]
 
 [[trouble.client.scarylogs]]
 === Shell or client application throws lots of scary exceptions during normal operation
@@ -706,7 +706,10 @@ Because of a change in the format in which MIT Kerberos writes its credentials c
 If you have this problematic combination of components in your environment, to work around this problem, first log in with `kinit` and then immediately refresh the credential cache with `kinit -R`.
 The refresh will rewrite the credential cache without the problematic formatting.
 
-Finally, depending on your Kerberos configuration, you may need to install the link:http://docs.oracle.com/javase/1.4.2/docs/guide/security/jce/JCERefGuide.html[Java Cryptography Extension], or JCE.
+Prior to JDK 1.4, the JCE was an unbundled product, and as such, the JCA and JCE were regularly referred to as separate, distinct components.
+As JCE is now bundled in the JDK 7.0, the distinction is becoming less apparent. Since the JCE uses the same architecture as the JCA, the JCE should be more properly thought of as a part of the JCA.
+
+You may need to install the link:https://docs.oracle.com/javase/1.5.0/docs/guide/security/jce/JCERefGuide.html[Java Cryptography Extension], or JCE because of JDK 1.5 or earlier version.
 Insure the JCE jars are on the classpath on both server and client systems.
 
 You may also need to download the link:http://www.oracle.com/technetwork/java/javase/downloads/jce-6-download-429243.html[unlimited strength JCE policy files].
@@ -758,7 +761,7 @@ For example (substitute VERSION with your HBase version):
 HADOOP_CLASSPATH=`hbase classpath` hadoop jar $HBASE_HOME/hbase-server-VERSION.jar rowcounter usertable
 ----
 
-See http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/package-summary.html#classpathfor more information on HBase MapReduce jobs and classpaths.
+See <<hbase.mapreduce.classpath,HBase, MapReduce, and the CLASSPATH>> for more information on HBase MapReduce jobs and classpaths.
 
 [[trouble.hbasezerocopybytestring]]
 === Launching a job, you get java.lang.IllegalAccessError: com/google/protobuf/HBaseZeroCopyByteString or class com.google.protobuf.ZeroCopyLiteralByteString cannot access its superclass com.google.protobuf.LiteralByteString
@@ -799,7 +802,7 @@ hadoop fs -du /hbase/myTable
 ----
 ...returns a list of the regions under the HBase table 'myTable' and their disk utilization.
 
-For more information on HDFS shell commands, see the link:http://hadoop.apache.org/common/docs/current/file_system_shell.html[HDFS FileSystem Shell documentation].
+For more information on HDFS shell commands, see the link:https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/FileSystemShell.html[HDFS FileSystem Shell documentation].
 
 [[trouble.namenode.hbase.objects]]
 === Browsing HDFS for HBase Objects
@@ -830,7 +833,7 @@ The HDFS directory structure of HBase WAL is..
             /<WAL>         (WAL files for the RegionServer)
 ----
 
-See the link:http://hadoop.apache.org/common/docs/current/hdfs_user_guide.html[HDFS User Guide] for other non-shell diagnostic utilities like `fsck`.
+See the link:https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsUserGuide.html[HDFS User Guide] for other non-shell diagnostic utilities like `fsck`.
 
 [[trouble.namenode.0size.hlogs]]
 ==== Zero size WALs with data in them
@@ -1171,7 +1174,7 @@ If you have a DNS server, you can set `hbase.zookeeper.dns.interface` and `hbase
 
 ZooKeeper is the cluster's "canary in the mineshaft". It'll be the first to notice issues if any so making sure its happy is the short-cut to a humming cluster.
 
-See the link:http://wiki.apache.org/hadoop/ZooKeeper/Troubleshooting[ZooKeeper Operating Environment Troubleshooting] page.
+See the link:https://wiki.apache.org/hadoop/ZooKeeper/Troubleshooting[ZooKeeper Operating Environment Troubleshooting] page.
 It has suggestions and tools for checking disk and networking performance; i.e.
 the operating environment your ZooKeeper and HBase are running in.
 
@@ -1310,7 +1313,7 @@ These changes were backported to HBase 0.98.x and apply to all newer versions.
 == HBase and HDFS
 
 General configuration guidance for Apache HDFS is out of the scope of this guide.
-Refer to the documentation available at http://hadoop.apache.org/ for extensive information about configuring HDFS.
+Refer to the documentation available at https://hadoop.apache.org/ for extensive information about configuring HDFS.
 This section deals with HDFS in terms of HBase.
 
 In most cases, HBase stores its data in Apache HDFS.

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/unit_testing.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/unit_testing.adoc b/src/main/asciidoc/_chapters/unit_testing.adoc
index 6131d5a..e503f81 100644
--- a/src/main/asciidoc/_chapters/unit_testing.adoc
+++ b/src/main/asciidoc/_chapters/unit_testing.adoc
@@ -33,7 +33,7 @@ For information on unit tests for HBase itself, see <<hbase.tests,hbase.tests>>.
 
 == JUnit
 
-HBase uses link:http://junit.org[JUnit] 4 for unit tests
+HBase uses link:http://junit.org[JUnit] for unit tests
 
 This example will add unit tests to the following example class:
 
@@ -117,8 +117,8 @@ First, add a dependency for Mockito to your Maven POM file.
 
 <dependency>
     <groupId>org.mockito</groupId>
-    <artifactId>mockito-all</artifactId>
-    <version>1.9.5</version>
+    <artifactId>mockito-core</artifactId>
+    <version>2.1.0</version>
     <scope>test</scope>
 </dependency>
 ----
@@ -171,7 +171,7 @@ Similarly, you can now expand into other operations such as Get, Scan, or Delete
 
 == MRUnit
 
-link:http://mrunit.apache.org/[Apache MRUnit] is a library that allows you to unit-test MapReduce jobs.
+link:https://mrunit.apache.org/[Apache MRUnit] is a library that allows you to unit-test MapReduce jobs.
 You can use it to test HBase jobs in the same way as other MapReduce jobs.
 
 Given a MapReduce job that writes to an HBase table called `MyTest`, which has one column family called `CF`, the reducer of such a job could look like the following:

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/upgrading.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/upgrading.adoc b/src/main/asciidoc/_chapters/upgrading.adoc
index d07766b..fd8a86a 100644
--- a/src/main/asciidoc/_chapters/upgrading.adoc
+++ b/src/main/asciidoc/_chapters/upgrading.adoc
@@ -67,7 +67,7 @@ In addition to the usual API versioning considerations HBase has other compatibi
 
 .File format compatibility
 * Support file formats backward and forward compatible
-* Example: File, ZK encoding, directory layout is upgraded automatically as part of an HBase upgrade. User can rollback to the older version and everything will continue to work.
+* Example: File, ZK encoding, directory layout is upgraded automatically as part of an HBase upgrade. User can downgrade to the older version and everything will continue to work.
 
 .Client API compatibility
 * Allow changing or removing existing client APIs.
@@ -75,7 +75,7 @@ In addition to the usual API versioning considerations HBase has other compatibi
 * APIs available in a patch version will be available in all later patch versions. However, new APIs may be added which will not be available in earlier patch versions.
 * New APIs introduced in a patch version will only be added in a source compatible way footnote:[See 'Source Compatibility' https://blogs.oracle.com/darcy/entry/kinds_of_compatibility]: i.e. code that implements public APIs will continue to compile.
 ** Example: A user using a newly deprecated API does not need to modify application code with HBase API calls until the next major version.
-* 
+*
 
 .Client Binary compatibility
 * Client code written to APIs available in a given patch release can run unchanged (no recompilation needed) against the new jars of later patch versions.
@@ -111,7 +111,7 @@ for warning about incompatible changes). All effort will be made to provide a de
 | | Major | Minor | Patch
 |Client-Server wire Compatibility|  N |Y |Y
 |Server-Server Compatibility |N |Y |Y
-|File Format Compatibility | N footnote:[comp_matrix_offline_upgrade_note,Running an offline upgrade tool without rollback might be needed. We will typically only support migrating data from major version X to major version X+1.] | Y |Y
+|File Format Compatibility | N footnote:[comp_matrix_offline_upgrade_note,Running an offline upgrade tool without downgrade might be needed. We will typically only support migrating data from major version X to major version X+1.] | Y |Y
 |Client API Compatibility  | N | Y |Y
 |Client Binary Compatibility | N | N |Y
 4+|Server-Side Limited API Compatibility
@@ -125,10 +125,23 @@ for warning about incompatible changes). All effort will be made to provide a de
 [[hbase.client.api.surface]]
 ==== HBase API Surface
 
-HBase has a lot of API points, but for the compatibility matrix above, we differentiate between Client API, Limited Private API, and Private API. HBase uses a version of link:https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Compatibility.html[Hadoop's Interface classification]. HBase's Interface classification classes can be found link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/classification/package-summary.html[here].
+HBase has a lot of API points, but for the compatibility matrix above, we differentiate between Client API, Limited Private API, and Private API. HBase uses link:https://yetus.apache.org/documentation/0.5.0/interface-classification/[Apache Yetus Audience Annotations] to guide downstream expectations for stability.
 
-* InterfaceAudience: captures the intended audience, possible values are Public (for end users and external projects), LimitedPrivate (for other Projects, Coprocessors or other plugin points), and Private (for internal use). Notice that, you may find that the classes which are declared as IA.Private are used as parameter or return value for the interfaces which are declared as IA.LimitedPrivate. This is possible. You should treat the IA.Private object as a monolithic object, which means you can use it as a parameter to call other methods, or return it, but you should never try to access its methods or fields.
-* InterfaceStability: describes what types of interface changes are permitted. Possible values are Stable, Evolving, Unstable, and Deprecated. Notice that this annotation is only valid for classes which are marked as IA.LimitedPrivate. The stability of IA.Public classes is only related to the upgrade type(major, minor or patch). And for IA.Private classes, there is no guarantee on the stability between releases. Refer to the Compatibility Matrix above for more details.
+* InterfaceAudience (link:https://yetus.apache.org/documentation/0.5.0/audience-annotations-apidocs/org/apache/yetus/audience/InterfaceAudience.html[javadocs]): captures the intended audience, possible values include:
+  - Public: safe for end users and external projects
+  - LimitedPrivate: used for internals we expect to be pluggable, such as coprocessors
+  - Private: strictly for use within HBase itself
+Classes which are defined as `IA.Private` may be used as parameters or return values for interfaces which are declared `IA.LimitedPrivate`. Treat the `IA.Private` object as opaque; do not try to access its methods or fields directly.
+* InterfaceStability (link:https://yetus.apache.org/documentation/0.5.0/audience-annotations-apidocs/org/apache/yetus/audience/InterfaceStability.html[javadocs]): describes what types of interface changes are permitted. Possible values include:
+  - Stable: the interface is fixed and is not expected to change
+  - Evolving: the interface may change in future minor verisons
+  - Unstable: the interface may change at any time
+
+Please keep in mind the following interactions between the `InterfaceAudience` and `InterfaceStability` annotations within the HBase project:
+
+* `IA.Public` classes are inherently stable and adhere to our stability guarantees relating to the type of upgrade (major, minor, or patch).
+* `IA.LimitedPrivate` classes should always be annotated with one of the given `InterfaceStability` values. If they are not, you should presume they are `IS.Unstable`.
+* `IA.Private` classes should be considered implicitly unstable, with no guarantee of stability between releases.
 
 [[hbase.client.api]]
 HBase Client API::
@@ -146,9 +159,9 @@ HBase Private API::
 === Pre 1.0 versions
 
 .HBase Pre-1.0 versions are all EOM
-NOTE: For new installations, do not deploy 0.94.y, 0.96.y, or 0.98.y.  Deploy our stable version. See link:https://issues.apache.org/jira/browse/HBASE-11642[EOL 0.96], link:https://issues.apache.org/jira/browse/HBASE-16215[clean up of EOM releases], and link:http://www.apache.org/dist/hbase/[the header of our downloads].
+NOTE: For new installations, do not deploy 0.94.y, 0.96.y, or 0.98.y.  Deploy our stable version. See link:https://issues.apache.org/jira/browse/HBASE-11642[EOL 0.96], link:https://issues.apache.org/jira/browse/HBASE-16215[clean up of EOM releases], and link:https://www.apache.org/dist/hbase/[the header of our downloads].
 
-Before the semantic versioning scheme pre-1.0, HBase tracked either Hadoop's versions (0.2x) or 0.9x versions. If you are into the arcane, checkout our old wiki page on link:http://wiki.apache.org/hadoop/Hbase/HBaseVersions[HBase Versioning] which tries to connect the HBase version dots. Below sections cover ONLY the releases before 1.0.
+Before the semantic versioning scheme pre-1.0, HBase tracked either Hadoop's versions (0.2x) or 0.9x versions. If you are into the arcane, checkout our old wiki page on link:https://web.archive.org/web/20150905071342/https://wiki.apache.org/hadoop/Hbase/HBaseVersions[HBase Versioning] which tries to connect the HBase version dots. Below sections cover ONLY the releases before 1.0.
 
 [[hbase.development.series]]
 .Odd/Even Versioning or "Development" Series Releases
@@ -180,6 +193,135 @@ Unless otherwise specified, HBase point versions are binary compatible. You can
 
 In the minor version-particular sections below, we call out where the versions are wire/protocol compatible and in this case, it is also possible to do a <<hbase.rolling.upgrade>>. For example, in <<upgrade1.0.rolling.upgrade>>, we state that it is possible to do a rolling upgrade between hbase-0.98.x and hbase-1.0.0.
 
+== Rollback
+
+Sometimes things don't go as planned when attempting an upgrade. This section explains how to perform a _rollback_ to an earlier HBase release. Note that this should only be needed between Major and some Minor releases. You should always be able to _downgrade_ between HBase Patch releases within the same Minor version. These instructions may require you to take steps before you start the upgrade process, so be sure to read through this section beforehand.
+
+=== Caveats
+
+.Rollback vs Downgrade
+This section describes how to perform a _rollback_ on an upgrade between HBase minor and major versions. In this document, rollback refers to the process of taking an upgraded cluster and restoring it to the old version _while losing all changes that have occurred since upgrade_. By contrast, a cluster _downgrade_ would restore an upgraded cluster to the old version while maintaining any data written since the upgrade. We currently only offer instructions to rollback HBase clusters. Further, rollback only works when these instructions are followed prior to performing the upgrade.
+
+When these instructions talk about rollback vs downgrade of prerequisite cluster services (i.e. HDFS), you should treat leaving the service version the same as a degenerate case of downgrade.
+
+.Replication
+Unless you are doing an all-service rollback, the HBase cluster will lose any configured peers for HBase replication. If your cluster is configured for HBase replication, then prior to following these instructions you should document all replication peers. After performing the rollback you should then add each documented peer back to the cluster. For more information on enabling HBase replication, listing peers, and adding a peer see <<hbase.replication.management>>. Note also that data written to the cluster since the upgrade may or may not have already been replicated to any peers. Determining which, if any, peers have seen replication data as well as rolling back the data in those peers is out of the scope of this guide.
+
+.Data Locality
+Unless you are doing an all-service rollback, going through a rollback procedure will likely destroy all locality for Region Servers. You should expect degraded performance until after the cluster has had time to go through compactions to restore data locality. Optionally, you can force a compaction to speed this process up at the cost of generating cluster load.
+
+.Configurable Locations
+The instructions below assume default locations for the HBase data directory and the HBase znode. Both of these locations are configurable and you should verify the value used in your cluster before proceeding. In the event that you have a different value, just replace the default with the one found in your configuration
+* HBase data directory is configured via the key 'hbase.rootdir' and has a default value of '/hbase'.
+* HBase znode is configured via the key 'zookeeper.znode.parent' and has a default value of '/hbase'.
+
+=== All service rollback
+
+If you will be performing a rollback of both the HDFS and ZooKeeper services, then HBase's data will be rolled back in the process.
+
+.Requirements
+
+* Ability to rollback HDFS and ZooKeeper
+
+.Before upgrade
+No additional steps are needed pre-upgrade. As an extra precautionary measure, you may wish to use distcp to back up the HBase data off of the cluster to be upgraded. To do so, follow the steps in the 'Before upgrade' section of 'Rollback after HDFS downgrade' but copy to another HDFS instance instead of within the same instance.
+
+.Performing a rollback
+
+. Stop HBase
+. Perform a rollback for HDFS and ZooKeeper (HBase should remain stopped)
+. Change the installed version of HBase to the previous version
+. Start HBase
+. Verify HBase contents—use the HBase shell to list tables and scan some known values.
+
+=== Rollback after HDFS rollback and ZooKeeper downgrade
+
+If you will be rolling back HDFS but going through a ZooKeeper downgrade, then HBase will be in an inconsistent state. You must ensure the cluster is not started until you complete this process.
+
+.Requirements
+
+* Ability to rollback HDFS
+* Ability to downgrade ZooKeeper
+
+.Before upgrade
+No additional steps are needed pre-upgrade. As an extra precautionary measure, you may wish to use distcp to back up the HBase data off of the cluster to be upgraded. To do so, follow the steps in the 'Before upgrade' section of 'Rollback after HDFS downgrade' but copy to another HDFS instance instead of within the same instance.
+
+.Performing a rollback
+
+. Stop HBase
+. Perform a rollback for HDFS and a downgrade for ZooKeeper (HBase should remain stopped)
+. Change the installed version of HBase to the previous version
+. Clean out ZooKeeper information related to HBase. WARNING: This step will permanently destroy all replication peers. Please see the section on HBase Replication under Caveats for more information.
++
+.Clean HBase information out of ZooKeeper
+[source,bash]
+----
+[hpnewton@gateway_node.example.com ~]$ zookeeper-client -server zookeeper1.example.com:2181,zookeeper2.example.com:2181,zookeeper3.example.com:2181
+Welcome to ZooKeeper!
+JLine support is disabled
+rmr /hbase
+quit
+Quitting...
+----
+. Start HBase
+. Verify HBase contents—use the HBase shell to list tables and scan some known values.
+
+=== Rollback after HDFS downgrade
+
+If you will be performing an HDFS downgrade, then you'll need to follow these instructions regardless of whether ZooKeeper goes through rollback, downgrade, or reinstallation.
+
+.Requirements
+
+* Ability to downgrade HDFS
+* Pre-upgrade cluster must be able to run MapReduce jobs
+* HDFS super user access
+* Sufficient space in HDFS for at least two copies of the HBase data directory
+
+.Before upgrade
+Before beginning the upgrade process, you must take a complete backup of HBase's backing data. The following instructions cover backing up the data within the current HDFS instance. Alternatively, you can use the distcp command to copy the data to another HDFS cluster.
+
+. Stop the HBase cluster
+. Copy the HBase data directory to a backup location using the https://hadoop.apache.org/docs/current/hadoop-distcp/DistCp.html[distcp command] as the HDFS super user (shown below on a security enabled cluster)
++
+.Using distcp to backup the HBase data directory
+[source,bash]
+----
+
+[hpnewton@gateway_node.example.com ~]$ kinit -k -t hdfs.keytab hdfs@EXAMPLE.COM
+[hpnewton@gateway_node.example.com ~]$ hadoop distcp /hbase /hbase-pre-upgrade-backup
+
+----
+. Distcp will launch a mapreduce job to handle copying the files in a distributed fashion. Check the output of the distcp command to ensure this job completed successfully.
+
+.Performing a rollback
+
+. Stop HBase
+. Perform a downgrade for HDFS and a downgrade/rollback for ZooKeeper (HBase should remain stopped)
+. Change the installed version of HBase to the previous version
+. Restore the HBase data directory from prior to the upgrade as the HDFS super user (shown below on a security enabled cluster). If you backed up your data on another HDFS cluster instead of locally, you will need to use the distcp command to copy it back to the current HDFS cluster.
++
+.Restore the HBase data directory
+[source,bash]
+----
+[hpnewton@gateway_node.example.com ~]$ kinit -k -t hdfs.keytab hdfs@EXAMPLE.COM
+[hpnewton@gateway_node.example.com ~]$ hdfs dfs -mv /hbase /hbase-upgrade-rollback
+[hpnewton@gateway_node.example.com ~]$ hdfs dfs -mv /hbase-pre-upgrade-backup /hbase
+----
+. Clean out ZooKeeper information related to HBase. WARNING: This step will permanently destroy all replication peers. Please see the section on HBase Replication under Caveats for more information.
++
+.Clean HBase information out of ZooKeeper
+[source,bash]
+----
+[hpnewton@gateway_node.example.com ~]$ zookeeper-client -server zookeeper1.example.com:2181,zookeeper2.example.com:2181,zookeeper3.example.com:2181
+Welcome to ZooKeeper!
+JLine support is disabled
+rmr /hbase
+quit
+Quitting...
+----
+. Start HBase
+. Verify HBase contents–use the HBase shell to list tables and scan some known values.
+
 == Upgrade Paths
 
 [[upgrade1.0]]
@@ -213,10 +355,6 @@ You may have made use of this configuration if you are using BucketCache. If NOT
 .If you have your own customer filters.
 See the release notes on the issue link:https://issues.apache.org/jira/browse/HBASE-12068[HBASE-12068 [Branch-1\] Avoid need to always do KeyValueUtil#ensureKeyValue for Filter transformCell]; be sure to follow the recommendations therein.
 
-[[dlr]]
-.Distributed Log Replay
-<<distributed.log.replay>> is off by default in HBase 1.0.0. Enabling it can make a big difference improving HBase MTTR. Enable this feature if you are doing a clean stop/start when you are upgrading. You cannot rolling upgrade to this feature (caveat if you are running on a version of HBase in excess of HBase 0.98.4 -- see link:https://issues.apache.org/jira/browse/HBASE-12577[HBASE-12577 Disable distributed log replay by default] for more).
-
 .Mismatch Of `hbase.client.scanner.max.result.size` Between Client and Server
 If either the client or server version is lower than 0.98.11/1.0.0 and the server
 has a smaller value for `hbase.client.scanner.max.result.size` than the client, scan
@@ -241,9 +379,9 @@ There are no known issues running a <<hbase.rolling.upgrade,rolling upgrade>> fr
 In hbase-1.x, the default Scan caching 'number of rows' changed.
 Where in 0.98.x, it defaulted to 100, in later HBase versions, the
 default became Integer.MAX_VALUE. Not setting a cache size can make
-for Scans that run for a long time server-side, especially if 
+for Scans that run for a long time server-side, especially if
 they are running with stringent filtering.  See
-link:https://issues.apache.org/jira/browse/HBASE-16973[Revisiting default value for hbase.client.scanner.caching]; 
+link:https://issues.apache.org/jira/browse/HBASE-16973[Revisiting default value for hbase.client.scanner.caching];
 for further discussion.
 
 [[upgrade1.0.from.0.94]]

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/zookeeper.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/zookeeper.adoc b/src/main/asciidoc/_chapters/zookeeper.adoc
index 91577da..33eeadb 100644
--- a/src/main/asciidoc/_chapters/zookeeper.adoc
+++ b/src/main/asciidoc/_chapters/zookeeper.adoc
@@ -106,7 +106,7 @@ The newer version, the better. ZooKeeper 3.4.x is required as of HBase 1.0.0
 .ZooKeeper Maintenance
 [CAUTION]
 ====
-Be sure to set up the data dir cleaner described under link:http://zookeeper.apache.org/doc/r3.1.2/zookeeperAdmin.html#sc_maintenance[ZooKeeper
+Be sure to set up the data dir cleaner described under link:https://zookeeper.apache.org/doc/r3.1.2/zookeeperAdmin.html#sc_maintenance[ZooKeeper
         Maintenance] else you could have 'interesting' problems a couple of months in; i.e.
 zookeeper could start dropping sessions if it has to run through a directory of hundreds of thousands of logs which is wont to do around leader reelection time -- a process rare but run on occasion whether because a machine is dropped or happens to hiccup.
 ====
@@ -135,9 +135,9 @@ ${HBASE_HOME}/bin/hbase-daemons.sh {start,stop} zookeeper
 Note that you can use HBase in this manner to spin up a ZooKeeper cluster, unrelated to HBase.
 Just make sure to set `HBASE_MANAGES_ZK` to `false`      if you want it to stay up across HBase restarts so that when HBase shuts down, it doesn't take ZooKeeper down with it.
 
-For more information about running a distinct ZooKeeper cluster, see the ZooKeeper link:http://hadoop.apache.org/zookeeper/docs/current/zookeeperStarted.html[Getting
+For more information about running a distinct ZooKeeper cluster, see the ZooKeeper link:https://hadoop.apache.org/zookeeper/docs/current/zookeeperStarted.html[Getting
         Started Guide].
-Additionally, see the link:http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A7[ZooKeeper Wiki] or the link:http://zookeeper.apache.org/doc/r3.3.3/zookeeperAdmin.html#sc_zkMulitServerSetup[ZooKeeper
+Additionally, see the link:https://wiki.apache.org/hadoop/ZooKeeper/FAQ#A7[ZooKeeper Wiki] or the link:https://zookeeper.apache.org/doc/r3.3.3/zookeeperAdmin.html#sc_zkMulitServerSetup[ZooKeeper
         documentation] for more information on ZooKeeper sizing.
 
 [[zk.sasl.auth]]
@@ -181,7 +181,7 @@ We'll refer to this JAAS configuration file as _$CLIENT_CONF_        below.
 
 === HBase-managed ZooKeeper Configuration
 
-On each node that will run a zookeeper, a master, or a regionserver, create a link:http://docs.oracle.com/javase/1.4.2/docs/guide/security/jgss/tutorials/LoginConfigFile.html[JAAS]        configuration file in the conf directory of the node's _HBASE_HOME_        directory that looks like the following:
+On each node that will run a zookeeper, a master, or a regionserver, create a link:http://docs.oracle.com/javase/7/docs/technotes/guides/security/jgss/tutorials/LoginConfigFile.html[JAAS]        configuration file in the conf directory of the node's _HBASE_HOME_        directory that looks like the following:
 
 [source,java]
 ----

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/book.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/book.adoc b/src/main/asciidoc/book.adoc
index e5898d5..1bc9ed7 100644
--- a/src/main/asciidoc/book.adoc
+++ b/src/main/asciidoc/book.adoc
@@ -19,14 +19,14 @@
  */
 ////
 
-= Apache HBase (TM) Reference Guide 
+= Apache HBase (TM) Reference Guide
 :Author: Apache HBase Team
 :Email: <hb...@lists.apache.org>
 :doctype: book
 :Version: {docVersion}
 :revnumber: {docVersion}
 // Logo for PDF -- doesn't render in HTML
-:title-logo: hbase_logo_with_orca.png
+:title-logo-image: image:hbase_logo_with_orca.png[pdfwidth=4.25in,align=center]
 :numbered:
 :toc: left
 :toclevels: 1
@@ -42,7 +42,7 @@
 // Logo for HTML -- doesn't render in PDF
 ++++
 <div>
-  <a href="http://hbase.apache.org"><img src="images/hbase_logo_with_orca.png" alt="Apache HBase Logo" /></a>
+  <a href="https://hbase.apache.org"><img src="images/hbase_logo_with_orca.png" alt="Apache HBase Logo" /></a>
 </div>
 ++++
 
@@ -62,6 +62,7 @@ include::_chapters/mapreduce.adoc[]
 include::_chapters/security.adoc[]
 include::_chapters/architecture.adoc[]
 include::_chapters/hbase_mob.adoc[]
+include::_chapters/backup_restore.adoc[]
 include::_chapters/hbase_apis.adoc[]
 include::_chapters/external_apis.adoc[]
 include::_chapters/thrift_filter_language.adoc[]
@@ -93,5 +94,3 @@ include::_chapters/asf.adoc[]
 include::_chapters/orca.adoc[]
 include::_chapters/tracing.adoc[]
 include::_chapters/rpc.adoc[]
-
-


[4/6] hbase git commit: updating docs from master

Posted by nd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/backup_restore.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/backup_restore.adoc b/src/main/asciidoc/_chapters/backup_restore.adoc
new file mode 100644
index 0000000..a9dbcf5
--- /dev/null
+++ b/src/main/asciidoc/_chapters/backup_restore.adoc
@@ -0,0 +1,912 @@
+////
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+////
+
+[[casestudies]]
+= Backup and Restore
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+
+[[br.overview]]
+== Overview
+
+Backup and restore is a standard operation provided by many databases. An effective backup and restore
+strategy helps ensure that users can recover data in case of unexpected failures. The HBase backup and restore
+feature helps ensure that enterprises using HBase as a canonical data repository can recover from catastrophic
+failures. Another important feature is the ability to restore the database to a particular
+point-in-time, commonly referred to as a snapshot.
+
+The HBase backup and restore feature provides the ability to create full backups and incremental backups on
+tables in an HBase cluster. The full backup is the foundation on which incremental backups are applied
+to build iterative snapshots. Incremental backups can be run on a schedule to capture changes over time,
+for example by using a Cron task. Incremental backups are more cost-effective than full backups because they only capture
+the changes since the last backup and they also enable administrators to restore the database to any prior incremental backup. Furthermore, the
+utilities also enable table-level data backup-and-recovery if you do not want to restore the entire dataset
+of the backup.
+
+The backup and restore feature supplements the HBase Replication feature. While HBase replication is ideal for
+creating "hot" copies of the data (where the replicated data is immediately available for query), the backup and
+restore feature is ideal for creating "cold" copies of data (where a manual step must be taken to restore the system).
+Previously, users only had the ability to create full backups via the ExportSnapshot functionality. The incremental
+backup implementation is the novel improvement over the previous "art" provided by ExportSnapshot.
+
+[[br.terminology]]
+== Terminology
+
+The backup and restore feature introduces new terminology which can be used to understand how control flows through the
+system.
+
+* _A backup_: A logical unit of data and metadata which can restore a table to its state at a specific point in time.
+* _Full backup_: a type of backup which wholly encapsulates the contents of the table at a point in time.
+* _Incremental backup_: a type of backup which contains the changes in a table since a full backup.
+* _Backup set_: A user-defined name which references one or more tables over which a backup can be executed.
+* _Backup ID_: A unique names which identifies one backup from the rest, e.g. `backupId_1467823988425`
+
+[[br.planning]]
+== Planning
+
+There are some common strategies which can be used to implement backup and restore in your environment. The following section
+shows how these strategies are implemented and identifies potential tradeoffs with each.
+
+WARNING: This backup and restore tools has not been tested on Transparent Data Encryption (TDE) enabled HDFS clusters.
+This is related to the open issue link:https://issues.apache.org/jira/browse/HBASE-16178[HBASE-16178].
+
+[[br.intracluster.backup]]
+=== Backup within a cluster
+
+This strategy stores the backups on the same cluster as where the backup was taken. This approach is only appropriate for testing
+as it does not provide any additional safety on top of what the software itself already provides.
+
+.Intra-Cluster Backup
+image::backup-intra-cluster.png[]
+
+[[br.dedicated.cluster.backup]]
+=== Backup using a dedicated cluster
+
+This strategy provides greater fault tolerance and provides a path towards disaster recovery. In this setting, you will
+store the backup on a separate HDFS cluster by supplying the backup destination cluster’s HDFS URL to the backup utility.
+You should consider backing up to a different physical location, such as a different data center.
+
+Typically, a backup-dedicated HDFS cluster uses a more economical hardware profile to save money.
+
+.Dedicated HDFS Cluster Backup
+image::backup-dedicated-cluster.png[]
+
+[[br.cloud.or.vendor.backup]]
+=== Backup to the Cloud or a storage vendor appliance
+
+Another approach to safeguarding HBase incremental backups is to store the data on provisioned, secure servers that belong
+to third-party vendors and that are located off-site. The vendor can be a public cloud provider or a storage vendor who uses
+a Hadoop-compatible file system, such as S3 and other HDFS-compatible destinations.
+
+.Backup to Cloud or Vendor Storage Solutions
+image::backup-cloud-appliance.png[]
+
+NOTE: The HBase backup utility does not support backup to multiple destinations. A workaround is to manually create copies
+of the backup files from HDFS or S3.
+
+[[br.initial.setup]]
+== First-time configuration steps
+
+This section contains the necessary configuration changes that must be made in order to use the backup and restore feature.
+As this feature makes significant use of YARN's MapReduce framework to parallelize these I/O heavy operations, configuration
+changes extend outside of just `hbase-site.xml`.
+
+=== Allow the "hbase" system user in YARN
+
+The YARN *container-executor.cfg* configuration file must have the following property setting: _allowed.system.users=hbase_. No spaces
+are allowed in entries of this configuration file.
+
+WARNING: Skipping this step will result in runtime errors when executing the first backup tasks.
+
+*Example of a valid container-executor.cfg file for backup and restore:*
+
+[source]
+----
+yarn.nodemanager.log-dirs=/var/log/hadoop/mapred
+yarn.nodemanager.linux-container-executor.group=yarn
+banned.users=hdfs,yarn,mapred,bin
+allowed.system.users=hbase
+min.user.id=500
+----
+
+=== HBase specific changes
+
+Add the following properties to hbase-site.xml and restart HBase if it is already running.
+
+NOTE: The ",..." is an ellipsis meant to imply that this is a comma-separated list of values, not literal text which should be added to hbase-site.xml.
+
+[source]
+----
+<property>
+  <name>hbase.backup.enable</name>
+  <value>true</value>
+</property>
+<property>
+  <name>hbase.master.logcleaner.plugins</name>
+  <value>org.apache.hadoop.hbase.backup.master.BackupLogCleaner,...</value>
+</property>
+<property>
+  <name>hbase.procedure.master.classes</name>
+  <value>org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager,...</value>
+</property>
+<property>
+  <name>hbase.procedure.regionserver.classes</name>
+  <value>org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager,...</value>
+</property>
+<property>
+  <name>hbase.coprocessor.region.classes</name>
+  <value>org.apache.hadoop.hbase.backup.BackupObserver,...</value>
+</property>
+<property>
+  <name>hbase.master.hfilecleaner.plugins</name>
+  <value>org.apache.hadoop.hbase.backup.BackupHFileCleaner,...</value>
+</property>
+----
+
+== Backup and Restore commands
+
+This covers the command-line utilities that administrators would run to create, restore, and merge backups. Tools to
+inspect details on specific backup sessions is covered in the next section, <<br.administration,Administration of Backup Images>>.
+
+Run the command `hbase backup help <command>` to access the online help that provides basic information about a command
+and its options. The below information is captured in this help message for each command.
+
+// hbase backup create
+
+[[br.creating.complete.backup]]
+### Creating a Backup Image
+
+[NOTE]
+====
+For HBase clusters also using Apache Phoenix: include the SQL system catalog tables in the backup. In the event that you
+need to restore the HBase backup, access to the system catalog tables enable you to resume Phoenix interoperability with the
+restored data.
+====
+
+The first step in running the backup and restore utilities is to perform a full backup and to store the data in a separate image
+from the source. At a minimum, you must do this to get a baseline before you can rely on incremental backups.
+
+Run the following command as HBase superuser:
+
+[source]
+----
+hbase backup create <type> <backup_path>
+----
+
+After the command finishes running, the console prints a SUCCESS or FAILURE status message. The SUCCESS message includes a _backup_ ID.
+The backup ID is the Unix time (also known as Epoch time) that the HBase master received the backup request from the client.
+
+[TIP]
+====
+Record the backup ID that appears at the end of a successful backup. In case the source cluster fails and you need to recover the
+dataset with a restore operation, having the backup ID readily available can save time.
+====
+
+[[br.create.positional.cli.arguments]]
+#### Positional Command-Line Arguments
+
+_type_::
+  The type of backup to execute: _full_ or _incremental_. As a reminder, an _incremental_ backup requires a _full_ backup to
+  already exist.
+
+_backup_path_::
+  The _backup_path_ argument specifies the full filesystem URI of where to store the backup image. Valid prefixes are
+  are _hdfs:_, _webhdfs:_, _gpfs:_, and _s3fs:_.
+
+[[br.create.named.cli.arguments]]
+#### Named Command-Line Arguments
+
+_-t <table_name[,table_name]>_::
+  A comma-separated list of tables to back up. If no tables are specified, all tables are backed up. No regular-expression or
+  wildcard support is present; all table names must be explicitly listed. See <<br.using.backup.sets,Backup Sets>> for more
+  information about peforming operations on collections of tables. Mutually exclusive with the _-s_ option; one of these
+  named options are required.
+
+_-s <backup_set_name>_::
+  Identify tables to backup based on a backup set. See <<br.using.backup.sets,Using Backup Sets>> for the purpose and usage
+  of backup sets. Mutually exclusive with the _-t_ option.
+
+_-w <number_workers>_::
+  (Optional) Specifies the number of parallel workers to copy data to backup destination. Backups are currently executed by MapReduce jobs
+  so this value corresponds to the number of Mappers that will be spawned by the job.
+
+_-b <bandwidth_per_worker>_::
+  (Optional) Specifies the bandwidth of each worker in MB per second.
+
+_-d_::
+  (Optional) Enables "DEBUG" mode which prints additional logging about the backup creation.
+
+_-q <name>_::
+  (Optional) Allows specification of the name of a YARN queue which the MapReduce job to create the backup should be executed in. This option
+  is useful to prevent backup tasks from stealing resources away from other MapReduce jobs of high importance.
+
+[[br.usage.examples]]
+#### Example usage
+
+[source]
+----
+$ hbase backup create full hdfs://host5:8020/data/backup -t SALES2,SALES3 -w 3
+----
+
+This command creates a full backup image of two tables, SALES2 and SALES3, in the HDFS instance who NameNode is host5:8020
+in the path _/data/backup_. The _-w_ option specifies that no more than three parallel works complete the operation.
+
+// hbase backup restore
+
+[[br.restoring.backup]]
+### Restoring a Backup Image
+
+Run the following command as an HBase superuser. You can only restore a backup on a running HBase cluster because the data must be
+redistributed the RegionServers for the operation to complete successfully.
+
+[source]
+----
+hbase restore <backup_path> <backup_id>
+----
+
+[[br.restore.positional.args]]
+#### Positional Command-Line Arguments
+
+_backup_path_::
+  The _backup_path_ argument specifies the full filesystem URI of where to store the backup image. Valid prefixes are
+  are _hdfs:_, _webhdfs:_, _gpfs:_, and _s3fs:_.
+
+_backup_id_::
+  The backup ID that uniquely identifies the backup image to be restored.
+
+
+[[br.restore.named.args]]
+#### Named Command-Line Arguments
+
+_-t <table_name[,table_name]>_::
+  A comma-separated list of tables to restore. See <<br.using.backup.sets,Backup Sets>> for more
+  information about peforming operations on collections of tables. Mutually exclusive with the _-s_ option; one of these
+  named options are required.
+
+_-s <backup_set_name>_::
+  Identify tables to backup based on a backup set. See <<br.using.backup.sets,Using Backup Sets>> for the purpose and usage
+  of backup sets. Mutually exclusive with the _-t_ option.
+
+_-q <name>_::
+  (Optional) Allows specification of the name of a YARN queue which the MapReduce job to create the backup should be executed in. This option
+  is useful to prevent backup tasks from stealing resources away from other MapReduce jobs of high importance.
+
+_-c_::
+  (Optional) Perform a dry-run of the restore. The actions are checked, but not executed.
+
+_-m <target_tables>_::
+  (Optional) A comma-separated list of tables to restore into. If this option is not provided, the original table name is used. When
+  this option is provided, there must be an equal number of entries provided in the `-t` option.
+
+_-o_::
+  (Optional) Overwrites the target table for the restore if the table already exists.
+
+
+[[br.restore.usage]]
+#### Example of Usage
+
+[source]
+----
+hbase backup restore /tmp/backup_incremental backupId_1467823988425 -t mytable1,mytable2
+----
+
+This command restores two tables of an incremental backup image. In this example:
+• `/tmp/backup_incremental` is the path to the directory containing the backup image.
+• `backupId_1467823988425` is the backup ID.
+• `mytable1` and `mytable2` are the names of tables in the backup image to be restored.
+
+// hbase backup merge
+
+[[br.merge.backup]]
+### Merging Incremental Backup Images
+
+This command can be used to merge two or more incremental backup images into a single incremental
+backup image. This can be used to consolidate multiple, small incremental backup images into a single
+larger incremental backup image. This command could be used to merge hourly incremental backups
+into a daily incremental backup image, or daily incremental backups into a weekly incremental backup.
+
+[source]
+----
+$ hbase backup merge <backup_ids>
+----
+
+[[br.merge.backup.positional.cli.arguments]]
+#### Positional Command-Line Arguments
+
+_backup_ids_::
+  A comma-separated list of incremental backup image IDs that are to be combined into a single image.
+
+[[br.merge.backup.named.cli.arguments]]
+#### Named Command-Line Arguments
+
+None.
+
+[[br.merge.backup.example]]
+#### Example usage
+
+[source]
+----
+$ hbase backup merge backupId_1467823988425,backupId_1467827588425
+----
+
+// hbase backup set
+
+[[br.using.backup.sets]]
+### Using Backup Sets
+
+Backup sets can ease the administration of HBase data backups and restores by reducing the amount of repetitive input
+of table names. You can group tables into a named backup set with the `hbase backup set add` command. You can then use
+the -set option to invoke the name of a backup set in the `hbase backup create` or `hbase backup restore` rather than list
+individually every table in the group. You can have multiple backup sets.
+
+NOTE: Note the differentiation between the `hbase backup set add` command and the _-set_ option. The `hbase backup set add`
+command must be run before using the `-set` option in a different command because backup sets must be named and defined
+before using backup sets as a shortcut.
+
+If you run the `hbase backup set add` command and specify a backup set name that does not yet exist on your system, a new set
+is created. If you run the command with the name of an existing backup set name, then the tables that you specify are added
+to the set.
+
+In this command, the backup set name is case-sensitive.
+
+NOTE: The metadata of backup sets are stored within HBase. If you do not have access to the original HBase cluster with the
+backup set metadata, then you must specify individual table names to restore the data.
+
+To create a backup set, run the following command as the HBase superuser:
+
+[source]
+----
+$ hbase backup set <subcommand> <backup_set_name> <tables>
+----
+
+[[br.set.subcommands]]
+#### Backup Set Subcommands
+
+The following list details subcommands of the hbase backup set command.
+
+NOTE: You must enter one (and no more than one) of the following subcommands after hbase backup set to complete an operation.
+Also, the backup set name is case-sensitive in the command-line utility.
+
+_add_::
+  Adds table[s] to a backup set. Specify a _backup_set_name_ value after this argument to create a backup set.
+
+_remove_::
+  Removes tables from the set. Specify the tables to remove in the tables argument.
+
+_list_::
+  Lists all backup sets.
+
+_describe_::
+  Displays a description of a backup set. The information includes whether the set has full
+  or incremental backups, start and end times of the backups, and a list of the tables in the set. This subcommand must precede
+  a valid value for the _backup_set_name_ value.
+
+_delete_::
+  Deletes a backup set. Enter the value for the _backup_set_name_ option directly after the `hbase backup set delete` command.
+
+[[br.set.positional.cli.arguments]]
+#### Positional Command-Line Arguments
+
+_backup_set_name_::
+  Use to assign or invoke a backup set name. The backup set name must contain only printable characters and cannot have any spaces.
+
+_tables_::
+  List of tables (or a single table) to include in the backup set. Enter the table names as a comma-separated list. If no tables
+  are specified, all tables are included in the set.
+
+TIP: Maintain a log or other record of the case-sensitive backup set names and the corresponding tables in each set on a separate
+or remote cluster, backup strategy. This information can help you in case of failure on the primary cluster.
+
+[[br.set.usage]]
+#### Example of Usage
+
+[source]
+----
+$ hbase backup set add Q1Data TEAM3,TEAM_4
+----
+
+Depending on the environment, this command results in _one_ of the following actions:
+
+* If the `Q1Data` backup set does not exist, a backup set containing tables `TEAM_3` and `TEAM_4` is created.
+* If the `Q1Data` backup set exists already, the tables `TEAM_3` and `TEAM_4` are added to the `Q1Data` backup set.
+
+[[br.administration]]
+## Administration of Backup Images
+
+The `hbase backup` command has several subcommands that help with administering backup images as they accumulate. Most production
+environments require recurring backups, so it is necessary to have utilities to help manage the data of the backup repository.
+Some subcommands enable you to find information that can help identify backups that are relevant in a search for particular data.
+You can also delete backup images.
+
+The following list details each `hbase backup subcommand` that can help administer backups. Run the full command-subcommand line as
+the HBase superuser.
+
+// hbase backup progress
+
+[[br.managing.backup.progress]]
+### Managing Backup Progress
+
+You can monitor a running backup in another terminal session by running the _hbase backup progress_ command and specifying the backup ID as an argument.
+
+For example, run the following command as hbase superuser to view the progress of a backup
+
+[source]
+----
+$ hbase backup progress <backup_id>
+----
+
+[[br.progress.positional.cli.arguments]]
+#### Positional Command-Line Arguments
+
+_backup_id_::
+  Specifies the backup that you want to monitor by seeing the progress information. The backupId is case-sensitive.
+
+[[br.progress.named.cli.arguments]]
+#### Named Command-Line Arguments
+
+None.
+
+[[br.progress.example]]
+#### Example usage
+
+[source]
+----
+hbase backup progress backupId_1467823988425
+----
+
+// hbase backup history
+
+[[br.managing.backup.history]]
+### Managing Backup History
+
+This command displays a log of backup sessions. The information for each session includes backup ID, type (full or incremental), the tables
+in the backup, status, and start and end time. Specify the number of backup sessions to display with the optional -n argument.
+
+[source]
+----
+$ hbase backup history <backup_id>
+----
+
+[[br.history.positional.cli.arguments]]
+#### Positional Command-Line Arguments
+
+_backup_id_::
+  Specifies the backup that you want to monitor by seeing the progress information. The backupId is case-sensitive.
+
+[[br.history.named.cli.arguments]]
+#### Named Command-Line Arguments
+
+_-n <num_records>_::
+  (Optional) The maximum number of backup records (Default: 10).
+
+_-p <backup_root_path>_::
+  The full filesystem URI of where backup images are stored.
+
+_-s <backup_set_name>_::
+  The name of the backup set to obtain history for. Mutually exclusive with the _-t_ option.
+
+_-t_ <table_name>::
+  The name of table to obtain history for. Mutually exclusive with the _-s_ option.
+
+[[br.history.backup.example]]
+#### Example usage
+
+[source]
+----
+$ hbase backup history
+$ hbase backup history -n 20
+$ hbase backup history -t WebIndexRecords
+----
+
+// hbase backup describe
+
+[[br.describe.backup]]
+### Describing a Backup Image
+
+This command can be used to obtain information about a specific backup image.
+
+[source]
+----
+$ hbase backup describe <backup_id>
+----
+
+[[br.describe.backup.positional.cli.arguments]]
+#### Positional Command-Line Arguments
+
+_backup_id_::
+  The ID of the backup image to describe.
+
+[[br.describe.backup.named.cli.arguments]]
+#### Named Command-Line Arguments
+
+None.
+
+[[br.describe.backup.example]]
+#### Example usage
+
+[source]
+----
+$ hbase backup describe backupId_1467823988425
+----
+
+// hbase backup delete
+
+[[br.delete.backup]]
+### Deleting a Backup Image
+
+This command can be used to delete a backup image which is no longer needed.
+
+[source]
+----
+$ hbase backup delete <backup_id>
+----
+
+[[br.delete.backup.positional.cli.arguments]]
+#### Positional Command-Line Arguments
+
+_backup_id_::
+  The ID to the backup image which should be deleted.
+
+[[br.delete.backup.named.cli.arguments]]
+#### Named Command-Line Arguments
+
+None.
+
+[[br.delete.backup.example]]
+#### Example usage
+
+[source]
+----
+$ hbase backup delete backupId_1467823988425
+----
+
+// hbase backup repair
+
+[[br.repair.backup]]
+### Backup Repair Command
+
+This command attempts to correct any inconsistencies in persisted backup metadata which exists as
+the result of software errors or unhandled failure scenarios. While the backup implementation tries
+to correct all errors on its own, this tool may be necessary in the cases where the system cannot
+automatically recover on its own.
+
+[source]
+----
+$ hbase backup repair
+----
+
+[[br.repair.backup.positional.cli.arguments]]
+#### Positional Command-Line Arguments
+
+None.
+
+[[br.repair.backup.named.cli.arguments]]
+### Named Command-Line Arguments
+
+None.
+
+[[br.repair.backup.example]]
+#### Example usage
+
+[source]
+----
+$ hbase backup repair
+----
+
+[[br.backup.configuration]]
+## Configuration keys
+
+The backup and restore feature includes both required and optional configuration keys.
+
+### Required properties
+
+_hbase.backup.enable_: Controls whether or not the feature is enabled (Default: `false`). Set this value to `true`.
+
+_hbase.master.logcleaner.plugins_: A comma-separated list of classes invoked when cleaning logs in the HBase Master. Set
+this value to `org.apache.hadoop.hbase.backup.master.BackupLogCleaner` or append it to the current value.
+
+_hbase.procedure.master.classes_: A comma-separated list of classes invoked with the Procedure framework in the Master. Set
+this value to `org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager` or append it to the current value.
+
+_hbase.procedure.regionserver.classes_: A comma-separated list of classes invoked with the Procedure framework in the RegionServer.
+Set this value to `org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager` or append it to the current value.
+
+_hbase.coprocessor.region.classes_: A comma-separated list of RegionObservers deployed on tables. Set this value to
+`org.apache.hadoop.hbase.backup.BackupObserver` or append it to the current value.
+
+_hbase.master.hfilecleaner.plugins_: A comma-separated list of HFileCleaners deployed on the Master. Set this value
+to `org.apache.hadoop.hbase.backup.BackupHFileCleaner` or append it to the current value.
+
+### Optional properties
+
+_hbase.backup.system.ttl_: The time-to-live in seconds of data in the `hbase:backup` tables (default: forever). This property
+is only relevant prior to the creation of the `hbase:backup` table. Use the `alter` command in the HBase shell to modify the TTL
+when this table already exists. See the <<br.filesystem.growth.warning,below section>> for more details on the impact of this
+configuration property.
+
+_hbase.backup.attempts.max_: The number of attempts to perform when taking hbase table snapshots (default: 10).
+
+_hbase.backup.attempts.pause.ms_: The amount of time to wait between failed snapshot attempts in milliseconds (default: 10000).
+
+_hbase.backup.logroll.timeout.millis_: The amount of time (in milliseconds) to wait for RegionServers to execute a WAL rolling
+in the Master's procedure framework (default: 30000).
+
+[[br.best.practices]]
+## Best Practices
+
+### Formulate a restore strategy and test it.
+
+Before you rely on a backup and restore strategy for your production environment, identify how backups must be performed,
+and more importantly, how restores must be performed. Test the plan to ensure that it is workable.
+At a minimum, store backup data from a production cluster on a different cluster or server. To further safeguard the data,
+use a backup location that is at a different physical location.
+
+If you have a unrecoverable loss of data on your primary production cluster as a result of computer system issues, you may
+be able to restore the data from a different cluster or server at the same site. However, a disaster that destroys the whole
+site renders locally stored backups useless. Consider storing the backup data and necessary resources (both computing capacity
+and operator expertise) to restore the data at a site sufficiently remote from the production site. In the case of a catastrophe
+at the whole primary site (fire, earthquake, etc.), the remote backup site can be very valuable.
+
+### Secure a full backup image first.
+
+As a baseline, you must complete a full backup of HBase data at least once before you can rely on incremental backups. The full
+backup should be stored outside of the source cluster. To ensure complete dataset recovery, you must run the restore utility
+with the option to restore baseline full backup. The full backup is the foundation of your dataset. Incremental backup data
+is applied on top of the full backup during the restore operation to return you to the point in time when backup was last taken.
+
+### Define and use backup sets for groups of tables that are logical subsets of the entire dataset.
+
+You can group tables into an object called a backup set. A backup set can save time when you have a particular group of tables
+that you expect to repeatedly back up or restore.
+
+When you create a backup set, you type table names to include in the group. The backup set includes not only groups of related
+tables, but also retains the HBase backup metadata. Afterwards, you can invoke the backup set name to indicate what tables apply
+to the command execution instead of entering all the table names individually.
+
+### Document the backup and restore strategy, and ideally log information about each backup.
+
+Document the whole process so that the knowledge base can transfer to new administrators after employee turnover. As an extra
+safety precaution, also log the calendar date, time, and other relevant details about the data of each backup. This metadata
+can potentially help locate a particular dataset in case of source cluster failure or primary site disaster. Maintain duplicate
+copies of all documentation: one copy at the production cluster site and another at the backup location or wherever it can be
+accessed by an administrator remotely from the production cluster.
+
+[[br.s3.backup.scenario]]
+## Scenario: Safeguarding Application Datasets on Amazon S3
+
+This scenario describes how a hypothetical retail business uses backups to safeguard application data and then restore the dataset
+after failure.
+
+The HBase administration team uses backup sets to store data from a group of tables that have interrelated information for an
+application called green. In this example, one table contains transaction records and the other contains customer details. The
+two tables need to be backed up and be recoverable as a group.
+
+The admin team also wants to ensure daily backups occur automatically.
+
+.Tables Composing The Backup Set
+image::backup-app-components.png[]
+
+The following is an outline of the steps and examples of commands that are used to backup the data for the _green_ application and
+to recover the data later. All commands are run when logged in as HBase superuser.
+
+1. A backup set called _green_set_ is created as an alias for both the transactions table and the customer table. The backup set can
+be used for all operations to avoid typing each table name. The backup set name is case-sensitive and should be formed with only
+printable characters and without spaces.
+
+[source]
+----
+$ hbase backup set add green_set transactions
+$ hbase backup set add green_set customer
+----
+
+2. The first backup of green_set data must be a full backup. The following command example shows how credentials are passed to Amazon
+S3 and specifies the file system with the s3a: prefix.
+
+[source]
+----
+$ ACCESS_KEY=ABCDEFGHIJKLMNOPQRST
+$ SECRET_KEY=123456789abcdefghijklmnopqrstuvwxyzABCD
+$ sudo -u hbase hbase backup create full\
+  s3a://$ACCESS_KEY:SECRET_KEY@prodhbasebackups/backups -s green_set
+----
+
+3. Incremental backups should be run according to a schedule that ensures essential data recovery in the event of a catastrophe. At
+this retail company, the HBase admin team decides that automated daily backups secures the data sufficiently. The team decides that
+they can implement this by modifying an existing Cron job that is defined in `/etc/crontab`. Consequently, IT modifies the Cron job
+by adding the following line:
+
+[source]
+----
+@daily hbase hbase backup create incremental s3a://$ACCESS_KEY:$SECRET_KEY@prodhbasebackups/backups -s green_set
+----
+
+4. A catastrophic IT incident disables the production cluster that the green application uses. An HBase system administrator of the
+backup cluster must restore the _green_set_ dataset to the point in time closest to the recovery objective.
+
+NOTE: If the administrator of the backup HBase cluster has the backup ID with relevant details in accessible records, the following
+search with the `hdfs dfs -ls` command and manually scanning the backup ID list can be bypassed. Consider continuously maintaining
+and protecting a detailed log of backup IDs outside the production cluster in your environment.
+
+The HBase administrator runs the following command on the directory where backups are stored to print the list of successful backup
+IDs on the console:
+
+`hdfs dfs -ls -t /prodhbasebackups/backups`
+
+5. The admin scans the list to see which backup was created at a date and time closest to the recovery objective. To do this, the
+admin converts the calendar timestamp of the recovery point in time to Unix time because backup IDs are uniquely identified with
+Unix time. The backup IDs are listed in reverse chronological order, meaning the most recent successful backup appears first.
+
+The admin notices that the following line in the command output corresponds with the _green_set_ backup that needs to be restored:
+
+`/prodhbasebackups/backups/backup_1467823988425`
+
+6. The admin restores green_set invoking the backup ID and the -overwrite option. The -overwrite option truncates all existing data
+in the destination and populates the tables with data from the backup dataset. Without this flag, the backup data is appended to the
+existing data in the destination. In this case, the admin decides to overwrite the data because it is corrupted.
+
+[source]
+----
+$ sudo -u hbase hbase restore -s green_set \
+  s3a://$ACCESS_KEY:$SECRET_KEY@prodhbasebackups/backups backup_1467823988425 \ -overwrite
+----
+
+[[br.data.security]]
+## Security of Backup Data
+
+With this feature which makes copying data to remote locations, it's important to take a moment to clearly state the procedural
+concerns that exist around data security. Like the HBase replication feature, backup and restore provides the constructs to automatically
+copy data from within a corporate boundary to some system outside of that boundary. It is imperative when storing sensitive data that with backup and restore, much
+less any feature which extracts data from HBase, the locations to which data is being sent has undergone a security audit to ensure
+that only authenticated users are allowed to access that data.
+
+For example, with the above example of backing up data to S3, it is of the utmost importance that the proper permissions are assigned
+to the S3 bucket to ensure that only a minimum set of authorized users are allowed to access this data. Because the data is no longer
+being accessed via HBase, and its authentication and authorization controls, we must ensure that the filesystem storing that data is
+providing a comparable level of security. This is a manual step which users *must* implement on their own.
+
+[[br.technical.details]]
+## Technical Details of Incremental Backup and Restore
+
+HBase incremental backups enable more efficient capture of HBase table images than previous attempts at serial backup and restore
+solutions, such as those that only used HBase Export and Import APIs. Incremental backups use Write Ahead Logs (WALs) to capture
+the data changes since the previous backup was created. A WAL roll (create new WALs) is executed across all RegionServers to track
+the WALs that need to be in the backup.
+
+After the incremental backup image is created, the source backup files usually are on same node as the data source. A process similar
+to the DistCp (distributed copy) tool is used to move the source backup files to the target file systems. When a table restore operation
+starts, a two-step process is initiated. First, the full backup is restored from the full backup image. Second, all WAL files from
+incremental backups between the last full backup and the incremental backup being restored are converted to HFiles, which the HBase
+Bulk Load utility automatically imports as restored data in the table.
+
+You can only restore on a live HBase cluster because the data must be redistributed to complete the restore operation successfully.
+
+[[br.filesystem.growth.warning]]
+## A Warning on File System Growth
+
+As a reminder, incremental backups are implemented via retaining the write-ahead logs which HBase primarily uses for data durability.
+Thus, to ensure that all data needing to be included in a backup is still available in the system, the HBase backup and restore feature
+retains all write-ahead logs since the last backup until the next incremental backup is executed.
+
+Like HBase Snapshots, this can have an expectedly large impact on the HDFS usage of HBase for high volume tables. Take care in enabling
+and using the backup and restore feature, specifically with a mind to removing backup sessions when they are not actively being used.
+
+The only automated, upper-bound on retained write-ahead logs for backup and restore is based on the TTL of the `hbase:backup` system table which,
+as of the time this document is written, is infinite (backup table entries are never automatically deleted). This requires that administrators
+perform backups on a schedule whose frequency is relative to the amount of available space on HDFS (e.g. less available HDFS space requires
+more aggressive backup merges and deletions). As a reminder, the TTL can be altered on the `hbase:backup` table using the `alter` command
+in the HBase shell. Modifying the configuration property `hbase.backup.system.ttl` in hbase-site.xml after the system table exists has no effect.
+
+[[br.backup.capacity.planning]]
+## Capacity Planning
+
+When designing a distributed system deployment, it is critical that some basic mathmatical rigor is executed to ensure sufficient computational
+capacity is available given the data and software requirements of the system. For this feature, the availability of network capacity is the largest
+bottleneck when estimating the performance of some implementation of backup and restore. The second most costly function is the speed at which
+data can be read/written.
+
+### Full Backups
+
+To estimate the duration of a full backup, we have to understand the general actions which are invoked:
+
+* Write-ahead log roll on each RegionServer: ones to tens of seconds per RegionServer in parallel. Relative to the load on each RegionServer.
+* Take an HBase snapshot of the table(s): tens of seconds. Relative to the number of regions and files that comprise the table.
+* Export the snapshot to the destination: see below. Relative to the size of the data and the network bandwidth to the destination.
+
+[[br.export.snapshot.cost]]
+To approximate how long the final step will take, we have to make some assumptions on hardware. Be aware that these will *not* be accurate for your
+system -- these are numbers that your or your administrator know for your system. Let's say the speed of reading data from HDFS on a single node is
+capped at 80MB/s (across all Mappers that run on that host), a modern network interface controller (NIC) supports 10Gb/s, the top-of-rack switch can
+handle 40Gb/s, and the WAN between your clusters is 10Gb/s. This means that you can only ship data to your remote at a speed of 1.25GB/s -- meaning
+that 16 nodes (`1.25 * 1024 / 80 = 16`) participating in the ExportSnapshot should be able to fully saturate the link between clusters. With more
+nodes in the cluster, we can still saturate the network but at a lesser impact on any one node which helps ensure local SLAs are made. If the size
+of the snapshot is 10TB, this would full backup would take in the ballpark of 2.5 hours (`10 * 1024 / 1.25 / (60 * 60) = 2.23hrs`)
+
+As a general statement, it is very likely that the WAN bandwidth between your local cluster and the remote storage is the largest
+bottleneck to the speed of a full backup.
+
+When the concern is restricting the computational impact of backups to a "production system", the above formulas can be reused with the optional
+command-line arguments to `hbase backup create`: `-b`, `-w`, `-q`. The `-b` option defines the bandwidth at which each worker (Mapper) would
+write data. The `-w` argument limits the number of workers that would be spawned in the DistCp job. The `-q` allows the user to specify a YARN
+queue which can limit the specific nodes where the workers will be spawned -- this can quarantine the backup workers performing the copy to
+a set of non-critical nodes. Relating the `-b` and `-w` options to our earlier equations: `-b` would be used to restrict each node from reading
+data at the full 80MB/s and `-w` is used to limit the job from spawning 16 worker tasks.
+
+### Incremental Backup
+
+Like we did for full backups, we have to understand the incremental backup process to approximate its runtime and cost.
+
+* Identify new write-ahead logs since last full or incremental backup: negligible. Apriori knowledge from the backup system table(s).
+* Read, filter, and write "minimized" HFiles equivalent to the WALs: dominated by the speed of writing data. Relative to write speed of HDFS.
+* DistCp the HFiles to the destination: <<br.export.snapshot.cost,see above>>.
+
+For the second step, the dominating cost of this operation would be the re-writing the data (under the assumption that a majority of the
+data in the WAL is preserved). In this case, we can assume an aggregate write speed of 30MB/s per node. Continuing our 16-node cluster example,
+this would require approximately 15 minutes to perform this step for 50GB of data (50 * 1024 / 60 / 60 = 14.2). The amount of time to start the
+DistCp MapReduce job would likely dominate the actual time taken to copy the data (50 / 1.25 = 40 seconds) and can be ignored.
+
+[[br.limitations]]
+## Limitations of the Backup and Restore Utility
+
+*Serial backup operations*
+
+Backup operations cannot be run concurrently. An operation includes actions like create, delete, restore, and merge. Only one active backup session is supported. link:https://issues.apache.org/jira/browse/HBASE-16391[HBASE-16391]
+will introduce multiple-backup sessions support.
+
+*No means to cancel backups*
+
+Both backup and restore operations cannot be canceled. (link:https://issues.apache.org/jira/browse/HBASE-15997[HBASE-15997], link:https://issues.apache.org/jira/browse/HBASE-15998[HBASE-15998]).
+The workaround to cancel a backup would be to kill the client-side backup command (`control-C`), ensure all relevant MapReduce jobs have exited, and then
+run the `hbase backup repair` command to ensure the system backup metadata is consistent.
+
+*Backups can only be saved to a single location*
+
+Copying backup information to multiple locations is an exercise left to the user. link:https://issues.apache.org/jira/browse/HBASE-15476[HBASE-15476] will
+introduce the ability to specify multiple-backup destinations intrinsically.
+
+*HBase superuser access is required*
+
+Only an HBase superuser (e.g. hbase) is allowed to perform backup/restore, can pose a problem for shared HBase installations. Current mitigations would require
+coordination with system administrators to build and deploy a backup and restore strategy (link:https://issues.apache.org/jira/browse/HBASE-14138[HBASE-14138]).
+
+*Backup restoration is an online operation*
+
+To perform a restore from a backup, it requires that the HBase cluster is online as a caveat of the current implementation (link:https://issues.apache.org/jira/browse/HBASE-16573[HBASE-16573]).
+
+*Some operations may fail and require re-run*
+
+The HBase backup feature is primarily client driven. While there is the standard HBase retry logic built into the HBase Connection, persistent errors in executing operations
+may propagate back to the client (e.g. snapshot failure due to region splits). The backup implementation should be moved from client-side into the ProcedureV2 framework
+in the future which would provide additional robustness around transient/retryable failures. The `hbase backup repair` command is meant to correct states which the system
+cannot automatically detect and recover from.
+
+*Avoidance of declaration of public API*
+
+While the Java API to interact with this feature exists and its implementation is separated from an interface, insufficient rigor has been applied to determine if
+it is exactly what we intend to ship to users. As such, it is marked as for a `Private` audience with the expectation that, as users begin to try the feature, there
+will be modifications that would necessitate breaking compatibility (link:https://issues.apache.org/jira/browse/HBASE-17517[HBASE-17517]).
+
+*Lack of global metrics for backup and restore*
+
+Individual backup and restore operations contain metrics about the amount of work the operation included, but there is no centralized location (e.g. the Master UI)
+which present information for consumption (link:https://issues.apache.org/jira/browse/HBASE-16565[HBASE-16565]).

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/community.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/community.adoc b/src/main/asciidoc/_chapters/community.adoc
index f63d597..d141dbf 100644
--- a/src/main/asciidoc/_chapters/community.adoc
+++ b/src/main/asciidoc/_chapters/community.adoc
@@ -47,9 +47,9 @@ The below policy is something we put in place 09/2012.
 It is a suggested policy rather than a hard requirement.
 We want to try it first to see if it works before we cast it in stone.
 
-Apache HBase is made of link:https://issues.apache.org/jira/browse/HBASE#selectedTab=com.atlassian.jira.plugin.system.project%3Acomponents-panel[components].
+Apache HBase is made of link:https://issues.apache.org/jira/projects/HBASE?selectedItem=com.atlassian.jira.jira-projects-plugin:components-page[components].
 Components have one or more <<owner,OWNER>>s.
-See the 'Description' field on the link:https://issues.apache.org/jira/browse/HBASE#selectedTab=com.atlassian.jira.plugin.system.project%3Acomponents-panel[components]        JIRA page for who the current owners are by component.
+See the 'Description' field on the link:https://issues.apache.org/jira/projects/HBASE?selectedItem=com.atlassian.jira.jira-projects-plugin:components-page[components] JIRA page for who the current owners are by component.
 
 Patches that fit within the scope of a single Apache HBase component require, at least, a +1 by one of the component's owners before commit.
 If owners are absent -- busy or otherwise -- two +1s by non-owners will suffice.
@@ -88,7 +88,7 @@ We also are currently in violation of this basic tenet -- replication at least k
 [[owner]]
 .Component Owner/Lieutenant
 
-Component owners are listed in the description field on this Apache HBase JIRA link:https://issues.apache.org/jira/browse/HBASE#selectedTab=com.atlassian.jira.plugin.system.project%3Acomponents-panel[components]        page.
+Component owners are listed in the description field on this Apache HBase JIRA link:https://issues.apache.org/jira/projects/HBASE?selectedItem=com.atlassian.jira.jira-projects-plugin:components-page[components] page.
 The owners are listed in the 'Description' field rather than in the 'Component Lead' field because the latter only allows us list one individual whereas it is encouraged that components have multiple owners.
 
 Owners or component lieutenants are volunteers who are (usually, but not necessarily) expert in their component domain and may have an agenda on how they think their Apache HBase component should evolve.

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/compression.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/compression.adoc b/src/main/asciidoc/_chapters/compression.adoc
index e5b9b8f..23ceeaf 100644
--- a/src/main/asciidoc/_chapters/compression.adoc
+++ b/src/main/asciidoc/_chapters/compression.adoc
@@ -115,12 +115,7 @@ The data format is nearly identical to Diff encoding, so there is not an image t
 Prefix Tree::
   Prefix tree encoding was introduced as an experimental feature in HBase 0.96.
   It provides similar memory savings to the Prefix, Diff, and Fast Diff encoder, but provides faster random access at a cost of slower encoding speed.
-+
-Prefix Tree may be appropriate for applications that have high block cache hit ratios. It introduces new 'tree' fields for the row and column.
-The row tree field contains a list of offsets/references corresponding to the cells in that row. This allows for a good deal of compression.
-For more details about Prefix Tree encoding, see link:https://issues.apache.org/jira/browse/HBASE-4676[HBASE-4676].
-+
-It is difficult to graphically illustrate a prefix tree, so no image is included. See the Wikipedia article for link:http://en.wikipedia.org/wiki/Trie[Trie] for more general information about this data structure.
+  It was removed in hbase-2.0.0. It was a good idea but little uptake. If interested in reviving this effort, write the hbase dev list.
 
 [[data.block.encoding.types]]
 === Which Compressor or Data Block Encoder To Use
@@ -267,8 +262,7 @@ See <<brand.new.compressor,brand.new.compressor>>).
 .Install LZO Support
 
 HBase cannot ship with LZO because of incompatibility between HBase, which uses an Apache Software License (ASL) and LZO, which uses a GPL license.
-See the link:http://wiki.apache.org/hadoop/UsingLzoCompression[Using LZO
-              Compression] wiki page for information on configuring LZO support for HBase.
+See the link:https://github.com/twitter/hadoop-lzo/blob/master/README.md[Hadoop-LZO at Twitter] for information on configuring LZO support for HBase.
 
 If you depend upon LZO compression, consider configuring your RegionServers to fail to start if LZO is not available.
 See <<hbase.regionserver.codecs,hbase.regionserver.codecs>>.

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/configuration.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc
index bf14d11..7218a42 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -43,7 +43,7 @@ _backup-masters_::
 
 _hadoop-metrics2-hbase.properties_::
   Used to connect HBase Hadoop's Metrics2 framework.
-  See the link:http://wiki.apache.org/hadoop/HADOOP-6728-MetricsV2[Hadoop Wiki entry] for more information on Metrics2.
+  See the link:https://wiki.apache.org/hadoop/HADOOP-6728-MetricsV2[Hadoop Wiki entry] for more information on Metrics2.
   Contains only commented-out examples by default.
 
 _hbase-env.cmd_ and _hbase-env.sh_::
@@ -124,7 +124,7 @@ NOTE: You must set `JAVA_HOME` on each node of your cluster. _hbase-env.sh_ prov
 [[os]]
 .Operating System Utilities
 ssh::
-  HBase uses the Secure Shell (ssh) command and utilities extensively to communicate between cluster nodes. Each server in the cluster must be running `ssh` so that the Hadoop and HBase daemons can be managed. You must be able to connect to all nodes via SSH, including the local node, from the Master as well as any backup Master, using a shared key rather than a password. You can see the basic methodology for such a set-up in Linux or Unix systems at "<<passwordless.ssh.quickstart>>". If your cluster nodes use OS X, see the section, link:http://wiki.apache.org/hadoop/Running_Hadoop_On_OS_X_10.5_64-bit_%28Single-Node_Cluster%29[SSH: Setting up Remote Desktop and Enabling Self-Login] on the Hadoop wiki.
+  HBase uses the Secure Shell (ssh) command and utilities extensively to communicate between cluster nodes. Each server in the cluster must be running `ssh` so that the Hadoop and HBase daemons can be managed. You must be able to connect to all nodes via SSH, including the local node, from the Master as well as any backup Master, using a shared key rather than a password. You can see the basic methodology for such a set-up in Linux or Unix systems at "<<passwordless.ssh.quickstart>>". If your cluster nodes use OS X, see the section, link:https://wiki.apache.org/hadoop/Running_Hadoop_On_OS_X_10.5_64-bit_%28Single-Node_Cluster%29[SSH: Setting up Remote Desktop and Enabling Self-Login] on the Hadoop wiki.
 
 DNS::
   HBase uses the local hostname to self-report its IP address. Both forward and reverse DNS resolving must work in versions of HBase previous to 0.92.0. The link:https://github.com/sujee/hadoop-dns-checker[hadoop-dns-checker] tool can be used to verify DNS is working correctly on the cluster. The project `README` file provides detailed instructions on usage.
@@ -180,13 +180,13 @@ Windows::
 
 
 [[hadoop]]
-=== link:http://hadoop.apache.org[Hadoop](((Hadoop)))
+=== link:https://hadoop.apache.org[Hadoop](((Hadoop)))
 
 The following table summarizes the versions of Hadoop supported with each version of HBase.
 Based on the version of HBase, you should select the most appropriate version of Hadoop.
 You can use Apache Hadoop, or a vendor's distribution of Hadoop.
 No distinction is made here.
-See link:http://wiki.apache.org/hadoop/Distributions%20and%20Commercial%20Support[the Hadoop wiki] for information about vendors of Hadoop.
+See link:https://wiki.apache.org/hadoop/Distributions%20and%20Commercial%20Support[the Hadoop wiki] for information about vendors of Hadoop.
 
 .Hadoop 2.x is recommended.
 [TIP]
@@ -220,6 +220,7 @@ Use the following legend to interpret this table:
 |Hadoop-2.7.0 | X | X | X | X
 |Hadoop-2.7.1+ | NT | S | S | S
 |Hadoop-2.8.0 | X | X | X | X
+|Hadoop-2.8.1 | X | X | X | X
 |Hadoop-3.0.0-alphax | NT | NT | NT | NT
 |===
 
@@ -250,7 +251,7 @@ Hadoop version 2.7.0 is not tested or supported as the Hadoop PMC has explicitly
 .Hadoop 2.8.x
 [TIP]
 ====
-Hadoop version 2.8.0 is not tested or supported as the Hadoop PMC has explicitly labeled that release as not being stable. (reference the link:https://s.apache.org/hadoop-2.8.0-announcement[announcement of Apache Hadoop 2.8.0].)
+Hadoop version 2.8.0 and 2.8.1 are not tested or supported as the Hadoop PMC has explicitly labeled that releases as not being stable. (reference the link:https://s.apache.org/hadoop-2.8.0-announcement[announcement of Apache Hadoop 2.8.0] and link:https://s.apache.org/hadoop-2.8.1-announcement[announcement of Apache Hadoop 2.8.1].)
 ====
 
 .Replace the Hadoop Bundled With HBase!
@@ -356,7 +357,7 @@ Distributed mode can be subdivided into distributed but all daemons run on a sin
 The _pseudo-distributed_ vs. _fully-distributed_ nomenclature comes from Hadoop.
 
 Pseudo-distributed mode can run against the local filesystem or it can run against an instance of the _Hadoop Distributed File System_ (HDFS). Fully-distributed mode can ONLY run on HDFS.
-See the Hadoop link:http://hadoop.apache.org/docs/current/[documentation] for how to set up HDFS.
+See the Hadoop link:https://hadoop.apache.org/docs/current/[documentation] for how to set up HDFS.
 A good walk-through for setting up HDFS on Hadoop 2 can be found at http://www.alexjf.net/blog/distributed-systems/hadoop-yarn-installation-definitive-guide.
 
 [[pseudo]]
@@ -546,19 +547,14 @@ Usually this ensemble location is kept out in the _hbase-site.xml_ and is picked
 
 If you are configuring an IDE to run an HBase client, you should include the _conf/_ directory on your classpath so _hbase-site.xml_ settings can be found (or add _src/test/resources_ to pick up the hbase-site.xml used by tests).
 
-Minimally, an HBase client needs several libraries in its `CLASSPATH` when connecting to a cluster, including:
-[source]
+Minimally, an HBase client needs hbase-client module in its dependencies when connecting to a cluster:
+[source,xml]
 ----
-
-commons-configuration (commons-configuration-1.6.jar)
-commons-lang (commons-lang-2.5.jar)
-commons-logging (commons-logging-1.1.1.jar)
-hadoop-core (hadoop-core-1.0.0.jar)
-hbase (hbase-0.92.0.jar)
-log4j (log4j-1.2.16.jar)
-slf4j-api (slf4j-api-1.5.8.jar)
-slf4j-log4j (slf4j-log4j12-1.5.8.jar)
-zookeeper (zookeeper-3.4.2.jar)
+<dependency>
+  <groupId>org.apache.hbase</groupId>
+  <artifactId>hbase-client</artifactId>
+  <version>1.2.4</version>
+</dependency>
 ----
 
 A basic example _hbase-site.xml_ for client only may look as follows:
@@ -579,7 +575,7 @@ A basic example _hbase-site.xml_ for client only may look as follows:
 [[java.client.config]]
 ==== Java client configuration
 
-The configuration used by a Java client is kept in an link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HBaseConfiguration[HBaseConfiguration] instance.
+The configuration used by a Java client is kept in an link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HBaseConfiguration[HBaseConfiguration] instance.
 
 The factory method on HBaseConfiguration, `HBaseConfiguration.create();`, on invocation, will read in the content of the first _hbase-site.xml_ found on the client's `CLASSPATH`, if one is present (Invocation will also factor in any _hbase-default.xml_ found; an _hbase-default.xml_ ships inside the _hbase.X.X.X.jar_). It is also possible to specify configuration directly without having to read from a _hbase-site.xml_.
 For example, to set the ZooKeeper ensemble for the cluster programmatically do as follows:
@@ -590,7 +586,7 @@ Configuration config = HBaseConfiguration.create();
 config.set("hbase.zookeeper.quorum", "localhost");  // Here we are running zookeeper locally
 ----
 
-If multiple ZooKeeper instances make up your ZooKeeper ensemble, they may be specified in a comma-separated list (just as in the _hbase-site.xml_ file). This populated `Configuration` instance can then be passed to an link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table], and so on.
+If multiple ZooKeeper instances make up your ZooKeeper ensemble, they may be specified in a comma-separated list (just as in the _hbase-site.xml_ file). This populated `Configuration` instance can then be passed to an link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table], and so on.
 
 [[example_config]]
 == Example Configurations
@@ -788,7 +784,7 @@ Manual splitting can mitigate region creation and movement under load.
 It also makes it so region boundaries are known and invariant (if you disable region splitting). If you use manual splits, it is easier doing staggered, time-based major compactions to spread out your network IO load.
 
 .Disable Automatic Splitting
-To disable automatic splitting, set `hbase.hregion.max.filesize` to a very large value, such as `100 GB` It is not recommended to set it to its absolute maximum value of `Long.MAX_VALUE`.
+To disable automatic splitting, you can set region split policy in either cluster configuration or table configuration to be `org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy`
 
 .Automatic Splitting Is Recommended
 [NOTE]
@@ -824,7 +820,7 @@ See the entry for `hbase.hregion.majorcompaction` in the <<compaction.parameters
 ====
 Major compactions are absolutely necessary for StoreFile clean-up.
 Do not disable them altogether.
-You can run major compactions manually via the HBase shell or via the http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Admin.html#majorCompact(org.apache.hadoop.hbase.TableName)[Admin API].
+You can run major compactions manually via the HBase shell or via the link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Admin.html#majorCompact-org.apache.hadoop.hbase.TableName-[Admin API].
 ====
 
 For more information about compactions and the compaction file selection process, see <<compaction,compaction>>

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/cp.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/cp.adoc b/src/main/asciidoc/_chapters/cp.adoc
index 2f5267f..abe334c 100644
--- a/src/main/asciidoc/_chapters/cp.adoc
+++ b/src/main/asciidoc/_chapters/cp.adoc
@@ -61,7 +61,7 @@ coprocessor can severely degrade cluster performance and stability.
 
 In HBase, you fetch data using a `Get` or `Scan`, whereas in an RDBMS you use a SQL
 query. In order to fetch only the relevant data, you filter it using a HBase
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/Filter.html[Filter]
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/Filter.html[Filter]
 , whereas in an RDBMS you use a `WHERE` predicate.
 
 After fetching the data, you perform computations on it. This paradigm works well
@@ -121,8 +121,8 @@ package.
 
 Observer coprocessors are triggered either before or after a specific event occurs.
 Observers that happen before an event use methods that start with a `pre` prefix,
-such as link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html#prePut%28org.apache.hadoop.hbase.coprocessor.ObserverContext,%20org.apache.hadoop.hbase.client.Put,%20org.apache.hadoop.hbase.regionserver.wal.WALEdit,%20org.apache.hadoop.hbase.client.Durability%29[`prePut`]. Observers that happen just after an event override methods that start
-with a `post` prefix, such as link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html#postPut%28org.apache.hadoop.hbase.coprocessor.ObserverContext,%20org.apache.hadoop.hbase.client.Put,%20org.apache.hadoop.hbase.regionserver.wal.WALEdit,%20org.apache.hadoop.hbase.client.Durability%29[`postPut`].
+such as link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html#prePut-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.client.Put-org.apache.hadoop.hbase.wal.WALEdit-org.apache.hadoop.hbase.client.Durability-[`prePut`]. Observers that happen just after an event override methods that start
+with a `post` prefix, such as link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html#postPut-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.client.Put-org.apache.hadoop.hbase.wal.WALEdit-org.apache.hadoop.hbase.client.Durability-[`postPut`].
 
 
 ==== Use Cases for Observer Coprocessors
@@ -139,7 +139,7 @@ Referential Integrity::
 
 Secondary Indexes::
   You can use a coprocessor to maintain secondary indexes. For more information, see
-  link:http://wiki.apache.org/hadoop/Hbase/SecondaryIndexing[SecondaryIndexing].
+  link:https://wiki.apache.org/hadoop/Hbase/SecondaryIndexing[SecondaryIndexing].
 
 
 ==== Types of Observer Coprocessor
@@ -163,7 +163,7 @@ MasterObserver::
 WalObserver::
   A WalObserver allows you to observe events related to writes to the Write-Ahead
   Log (WAL). See
-  link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/WALObserver.html[WALObserver].
+  link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/WALObserver.html[WALObserver].
 
 <<cp_example,Examples>> provides working examples of observer coprocessors.
 
@@ -178,7 +178,7 @@ average or summation for an entire table which spans hundreds of regions.
 
 In contrast to observer coprocessors, where your code is run transparently, endpoint
 coprocessors must be explicitly invoked using the
-link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html#coprocessorService%28java.lang.Class,%20byte%5B%5D,%20byte%5B%5D,%20org.apache.hadoop.hbase.client.coprocessor.Batch.Call%29[CoprocessorService()]
+link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html#coprocessorService-java.lang.Class-byte:A-byte:A-org.apache.hadoop.hbase.client.coprocessor.Batch.Call-[CoprocessorService()]
 method available in
 link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html[Table]
 or

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/datamodel.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/datamodel.adoc b/src/main/asciidoc/_chapters/datamodel.adoc
index da4143a..3674566 100644
--- a/src/main/asciidoc/_chapters/datamodel.adoc
+++ b/src/main/asciidoc/_chapters/datamodel.adoc
@@ -67,7 +67,7 @@ Timestamp::
 [[conceptual.view]]
 == Conceptual View
 
-You can read a very understandable explanation of the HBase data model in the blog post link:http://jimbojw.com/wiki/index.php?title=Understanding_Hbase_and_BigTable[Understanding HBase and BigTable] by Jim R. Wilson.
+You can read a very understandable explanation of the HBase data model in the blog post link:http://jimbojw.com/#understanding%20hbase[Understanding HBase and BigTable] by Jim R. Wilson.
 Another good explanation is available in the PDF link:http://0b4af6cdc2f0c5998459-c0245c5c937c5dedcca3f1764ecc9b2f.r43.cf2.rackcdn.com/9353-login1210_khurana.pdf[Introduction to Basic Schema Design] by Amandeep Khurana.
 
 It may help to read different perspectives to get a solid understanding of HBase schema design.
@@ -173,7 +173,7 @@ This abstraction lays the groundwork for upcoming multi-tenancy related features
 
 * Quota Management (link:https://issues.apache.org/jira/browse/HBASE-8410[HBASE-8410]) - Restrict the amount of resources (i.e. regions, tables) a namespace can consume.
 * Namespace Security Administration (link:https://issues.apache.org/jira/browse/HBASE-9206[HBASE-9206]) - Provide another level of security administration for tenants.
-* Region server groups (link:https://issues.apache.org/jira/browse/HBASE-6721[HBASE-6721]) - A namespace/table can be pinned onto a subset of RegionServers thus guaranteeing a course level of isolation.
+* Region server groups (link:https://issues.apache.org/jira/browse/HBASE-6721[HBASE-6721]) - A namespace/table can be pinned onto a subset of RegionServers thus guaranteeing a coarse level of isolation.
 
 [[namespace_creation]]
 === Namespace management
@@ -270,21 +270,21 @@ Cell content is uninterpreted bytes
 == Data Model Operations
 
 The four primary data model operations are Get, Put, Scan, and Delete.
-Operations are applied via link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table] instances.
+Operations are applied via link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table] instances.
 
 === Get
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Get.html[Get] returns attributes for a specified row.
-Gets are executed via link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#get(org.apache.hadoop.hbase.client.Get)[Table.get].
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Get.html[Get] returns attributes for a specified row.
+Gets are executed via link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#get-org.apache.hadoop.hbase.client.Get-[Table.get]
 
 === Put
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Put.html[Put] either adds new rows to a table (if the key is new) or can update existing rows (if the key already exists). Puts are executed via link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#put(org.apache.hadoop.hbase.client.Put)[Table.put] (writeBuffer) or link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#batch(java.util.List,%20java.lang.Object%5B%5D)[Table.batch] (non-writeBuffer).
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Put.html[Put] either adds new rows to a table (if the key is new) or can update existing rows (if the key already exists). Puts are executed via link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#put-org.apache.hadoop.hbase.client.Put-[Table.put] (non-writeBuffer) or link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#batch-java.util.List-java.lang.Object:A-[Table.batch] (non-writeBuffer)
 
 [[scan]]
 === Scans
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[Scan] allow iteration over multiple rows for specified attributes.
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[Scan] allow iteration over multiple rows for specified attributes.
 
 The following is an example of a Scan on a Table instance.
 Assume that a table is populated with rows with keys "row1", "row2", "row3", and then another set of rows with the keys "abc1", "abc2", and "abc3". The following example shows how to set a Scan instance to return the rows beginning with "row".
@@ -311,12 +311,12 @@ try {
 }
 ----
 
-Note that generally the easiest way to specify a specific stop point for a scan is by using the link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/InclusiveStopFilter.html[InclusiveStopFilter] class.
+Note that generally the easiest way to specify a specific stop point for a scan is by using the link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/InclusiveStopFilter.html[InclusiveStopFilter] class.
 
 === Delete
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Delete.html[Delete] removes a row from a table.
-Deletes are executed via link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#delete(org.apache.hadoop.hbase.client.Delete)[Table.delete].
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Delete.html[Delete] removes a row from a table.
+Deletes are executed via link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#delete-org.apache.hadoop.hbase.client.Delete-[Table.delete].
 
 HBase does not modify data in place, and so deletes are handled by creating new markers called _tombstones_.
 These tombstones, along with the dead values, are cleaned up on major compactions.
@@ -341,7 +341,7 @@ In particular:
 * It is OK to write cells in a non-increasing version order.
 
 Below we describe how the version dimension in HBase currently works.
-See link:https://issues.apache.org/jira/browse/HBASE-2406[HBASE-2406] for discussion of HBase versions. link:http://outerthought.org/blog/417-ot.html[Bending time in HBase] makes for a good read on the version, or time, dimension in HBase.
+See link:https://issues.apache.org/jira/browse/HBASE-2406[HBASE-2406] for discussion of HBase versions. link:https://www.ngdata.com/bending-time-in-hbase/[Bending time in HBase] makes for a good read on the version, or time, dimension in HBase.
 It has more detail on versioning than is provided here.
 As of this writing, the limitation _Overwriting values at existing timestamps_ mentioned in the article no longer holds in HBase.
 This section is basically a synopsis of this article by Bruno Dumon.
@@ -355,7 +355,7 @@ Prior to HBase 0.96, the default number of versions kept was `3`, but in 0.96 an
 .Modify the Maximum Number of Versions for a Column Family
 ====
 This example uses HBase Shell to keep a maximum of 5 versions of all columns in column family `f1`.
-You could also use link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor].
+You could also use link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor].
 
 ----
 hbase> alter ‘t1′, NAME => ‘f1′, VERSIONS => 5
@@ -367,7 +367,7 @@ hbase> alter ‘t1′, NAME => ‘f1′, VERSIONS => 5
 You can also specify the minimum number of versions to store per column family.
 By default, this is set to 0, which means the feature is disabled.
 The following example sets the minimum number of versions on all columns in column family `f1` to `2`, via HBase Shell.
-You could also use link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor].
+You could also use link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor].
 
 ----
 hbase> alter ‘t1′, NAME => ‘f1′, MIN_VERSIONS => 2
@@ -385,12 +385,12 @@ In this section we look at the behavior of the version dimension for each of the
 ==== Get/Scan
 
 Gets are implemented on top of Scans.
-The below discussion of link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Get.html[Get] applies equally to link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[Scans].
+The below discussion of link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Get.html[Get] applies equally to link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[Scans].
 
 By default, i.e. if you specify no explicit version, when doing a `get`, the cell whose version has the largest value is returned (which may or may not be the latest one written, see later). The default behavior can be modified in the following ways:
 
-* to return more than one version, see link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Get.html#setMaxVersions()[Get.setMaxVersions()]
-* to return versions other than the latest, see link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Get.html#setTimeRange(long,%20long)[Get.setTimeRange()]
+* to return more than one version, see link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Get.html#setMaxVersions--[Get.setMaxVersions()]
+* to return versions other than the latest, see link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Get.html#setTimeRange-long-long-[Get.setTimeRange()]
 +
 To retrieve the latest version that is less than or equal to a given value, thus giving the 'latest' state of the record at a certain point in time, just use a range from 0 to the desired version and set the max versions to 1.
 
@@ -525,7 +525,7 @@ _...create three cell versions at t1, t2 and t3, with a maximum-versions
     setting of 2. So when getting all versions, only the values at t2 and t3 will be
     returned. But if you delete the version at t2 or t3, the one at t1 will appear again.
     Obviously, once a major compaction has run, such behavior will not be the case
-    anymore..._ (See _Garbage Collection_ in link:http://outerthought.org/blog/417-ot.html[Bending time in HBase].)
+    anymore..._ (See _Garbage Collection_ in link:https://www.ngdata.com/bending-time-in-hbase/[Bending time in HBase].)
 
 [[dm.sort]]
 == Sort Order


[5/6] hbase git commit: updating docs from master

Posted by nd...@apache.org.
updating docs from master


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2e9a55be
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2e9a55be
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2e9a55be

Branch: refs/heads/branch-1.1
Commit: 2e9a55befc308b4892ea5a083412e4f36178ed1a
Parents: b6ff374
Author: Nick Dimiduk <nd...@apache.org>
Authored: Thu Nov 30 19:53:20 2017 -0800
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Thu Nov 30 19:53:20 2017 -0800

----------------------------------------------------------------------
 .../asciidoc/_chapters/appendix_acl_matrix.adoc |   1 +
 .../appendix_contributing_to_documentation.adoc |  42 +-
 src/main/asciidoc/_chapters/architecture.adoc   | 269 ++++--
 src/main/asciidoc/_chapters/asf.adoc            |   4 +-
 src/main/asciidoc/_chapters/backup_restore.adoc | 912 +++++++++++++++++++
 src/main/asciidoc/_chapters/community.adoc      |   6 +-
 src/main/asciidoc/_chapters/compression.adoc    |  10 +-
 src/main/asciidoc/_chapters/configuration.adoc  |  40 +-
 src/main/asciidoc/_chapters/cp.adoc             |  12 +-
 src/main/asciidoc/_chapters/datamodel.adoc      |  34 +-
 src/main/asciidoc/_chapters/developer.adoc      | 351 ++++---
 src/main/asciidoc/_chapters/external_apis.adoc  |  27 +-
 src/main/asciidoc/_chapters/faq.adoc            |   4 +-
 .../asciidoc/_chapters/getting_started.adoc     |   6 +-
 src/main/asciidoc/_chapters/hbase-default.adoc  |  52 +-
 src/main/asciidoc/_chapters/hbase_apis.adoc     |   2 +-
 src/main/asciidoc/_chapters/mapreduce.adoc      | 112 ++-
 src/main/asciidoc/_chapters/ops_mgt.adoc        | 159 +++-
 src/main/asciidoc/_chapters/other_info.adoc     |  14 +-
 src/main/asciidoc/_chapters/performance.adoc    |  41 +-
 src/main/asciidoc/_chapters/preface.adoc        |   4 +-
 src/main/asciidoc/_chapters/protobuf.adoc       |   2 +-
 src/main/asciidoc/_chapters/rpc.adoc            |   2 +-
 src/main/asciidoc/_chapters/schema_design.adoc  |  41 +-
 src/main/asciidoc/_chapters/security.adoc       |  12 +-
 src/main/asciidoc/_chapters/spark.adoc          |   4 +-
 src/main/asciidoc/_chapters/sql.adoc            |   4 +-
 .../_chapters/thrift_filter_language.adoc       |   2 +-
 src/main/asciidoc/_chapters/tracing.adoc        |   8 +-
 .../asciidoc/_chapters/troubleshooting.adoc     |  19 +-
 src/main/asciidoc/_chapters/unit_testing.adoc   |   8 +-
 src/main/asciidoc/_chapters/upgrading.adoc      | 166 +++-
 src/main/asciidoc/_chapters/zookeeper.adoc      |   8 +-
 src/main/asciidoc/book.adoc                     |   9 +-
 34 files changed, 1855 insertions(+), 532 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
index 1d7c748..0c99b1f 100644
--- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
+++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
@@ -123,6 +123,7 @@ In case the table goes out of date, the unit tests which check for accuracy of p
 |        | getReplicationPeerConfig | superuser\|global(A)
 |        | updateReplicationPeerConfig | superuser\|global(A)
 |        | listReplicationPeers | superuser\|global(A)
+|        | getClusterStatus | superuser\|global(A)
 | Region | openRegion | superuser\|global(A)
 |        | closeRegion | superuser\|global(A)
 |        | flush | superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/appendix_contributing_to_documentation.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/appendix_contributing_to_documentation.adoc b/src/main/asciidoc/_chapters/appendix_contributing_to_documentation.adoc
index 0337182..a603c16 100644
--- a/src/main/asciidoc/_chapters/appendix_contributing_to_documentation.adoc
+++ b/src/main/asciidoc/_chapters/appendix_contributing_to_documentation.adoc
@@ -35,9 +35,9 @@ including the documentation.
 
 In HBase, documentation includes the following areas, and probably some others:
 
-* The link:http://hbase.apache.org/book.html[HBase Reference
+* The link:https://hbase.apache.org/book.html[HBase Reference
   Guide] (this book)
-* The link:http://hbase.apache.org/[HBase website]
+* The link:https://hbase.apache.org/[HBase website]
 * API documentation
 * Command-line utility output and help text
 * Web UI strings, explicit help text, context-sensitive strings, and others
@@ -119,14 +119,14 @@ JIRA and add a version number to the name of the new patch.
 
 === Editing the HBase Website
 
-The source for the HBase website is in the HBase source, in the _src/main/site/_ directory.
+The source for the HBase website is in the HBase source, in the _src/site/_ directory.
 Within this directory, source for the individual pages is in the _xdocs/_ directory,
 and images referenced in those pages are in the _resources/images/_ directory.
 This directory also stores images used in the HBase Reference Guide.
 
 The website's pages are written in an HTML-like XML dialect called xdoc, which
 has a reference guide at
-http://maven.apache.org/archives/maven-1.x/plugins/xdoc/reference/xdocs.html.
+https://maven.apache.org/archives/maven-1.x/plugins/xdoc/reference/xdocs.html.
 You can edit these files in a plain-text editor, an IDE, or an XML editor such
 as XML Mind XML Editor (XXE) or Oxygen XML Author.
 
@@ -138,23 +138,23 @@ When you are satisfied with your changes, follow the procedure in
 [[website_publish]]
 === Publishing the HBase Website and Documentation
 
-HBase uses the ASF's `gitpubsub` mechanism.
-. After generating the website and documentation
-artifacts using `mvn clean site site:stage`, check out the `asf-site` repository.
+HBase uses the ASF's `gitpubsub` mechanism. A Jenkins job runs the
+`dev-support/jenkins-scripts/generate-hbase-website.sh` script, which runs the
+`mvn clean site site:stage` against the `master` branch of the `hbase`
+repository and commits the built artifacts to the `asf-site` branch of the
+`hbase-site` repository. When the commit is pushed, the website is redeployed
+automatically. If the script encounters an error, an email is sent to the
+developer mailing list. You can run the script manually or examine it to see the
+steps involved.
 
-. Remove previously-generated content using the following command:
-+
-----
-rm -rf rm -rf *apidocs* *book* *.html *.pdf* css js
-----
-+
-WARNING: Do not remove the `0.94/` directory. To regenerate them, you must check out
-the 0.94 branch and run `mvn clean site site:stage` from there, and then copy the
-artifacts to the 0.94/ directory of the `asf-site` branch.
-
-. Copy the contents of `target/staging` to the branch.
+[[website_check_links]]
+=== Checking the HBase Website for Broken Links
 
-. Add and commit your changes, and submit a patch for review.
+A Jenkins job runs periodically to check HBase website for broken links, using
+the `dev-support/jenkins-scripts/check-website-links.sh` script. This script
+uses a tool called `linklint` to check for bad links and create a report. If
+broken links are found, an email is sent to the developer mailing list. You can
+run the script manually or examine it to see the steps involved.
 
 === HBase Reference Guide Style Guide and Cheat Sheet
 
@@ -216,7 +216,7 @@ link:http://www.google.com[Google]
 ----
 image::sunset.jpg[Alt Text]
 ----
-(put the image in the src/main/site/resources/images directory)
+(put the image in the src/site/resources/images directory)
 | An inline image | The image with alt text, as part of the text flow |
 ----
 image:sunset.jpg [Alt Text]
@@ -389,7 +389,7 @@ Inline images cannot have titles. They are generally small images like GUI butto
 image:sunset.jpg[Alt Text]
 ----
 
-When doing a local build, save the image to the _src/main/site/resources/images/_ directory.
+When doing a local build, save the image to the _src/site/resources/images/_ directory.
 When you link to the image, do not include the directory portion of the path.
 The image will be copied to the appropriate target location during the build of the output.
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/architecture.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc
index 930fa60..9a3cbd9 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -76,7 +76,7 @@ HBase can run quite well stand-alone on a laptop - but this should be considered
 [[arch.overview.hbasehdfs]]
 === What Is The Difference Between HBase and Hadoop/HDFS?
 
-link:http://hadoop.apache.org/hdfs/[HDFS] is a distributed file system that is well suited for the storage of large files.
+link:https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html[HDFS] is a distributed file system that is well suited for the storage of large files.
 Its documentation states that it is not, however, a general purpose file system, and does not provide fast individual record lookups in files.
 HBase, on the other hand, is built on top of HDFS and provides fast record lookups (and updates) for large tables.
 This can sometimes be a point of conceptual confusion.
@@ -88,31 +88,10 @@ See the <<datamodel>> and the rest of this chapter for more information on how H
 
 The catalog table `hbase:meta` exists as an HBase table and is filtered out of the HBase shell's `list` command, but is in fact a table just like any other.
 
-[[arch.catalog.root]]
-=== -ROOT-
-
-NOTE: The `-ROOT-` table was removed in HBase 0.96.0.
-Information here should be considered historical.
-
-The `-ROOT-` table kept track of the location of the `.META` table (the previous name for the table now called `hbase:meta`) prior to HBase 0.96.
-The `-ROOT-` table structure was as follows:
-
-.Key
-
-* .META.
-  region key (`.META.,,1`)
-
-.Values
-
-* `info:regioninfo` (serialized link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HRegionInfo.html[HRegionInfo] instance of `hbase:meta`)
-* `info:server` (server:port of the RegionServer holding `hbase:meta`)
-* `info:serverstartcode` (start-time of the RegionServer process holding `hbase:meta`)
-
 [[arch.catalog.meta]]
 === hbase:meta
 
-The `hbase:meta` table (previously called `.META.`) keeps a list of all regions in the system.
-The location of `hbase:meta` was previously tracked within the `-ROOT-` table, but is now stored in ZooKeeper.
+The `hbase:meta` table (previously called `.META.`) keeps a list of all regions in the system, and the location of `hbase:meta` is stored in ZooKeeper.
 
 The `hbase:meta` table structure is as follows:
 
@@ -122,7 +101,7 @@ The `hbase:meta` table structure is as follows:
 
 .Values
 
-* `info:regioninfo` (serialized link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HRegionInfo.html[HRegionInfo] instance for this region)
+* `info:regioninfo` (serialized link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HRegionInfo.html[HRegionInfo] instance for this region)
 * `info:server` (server:port of the RegionServer containing this region)
 * `info:serverstartcode` (start-time of the RegionServer process containing this region)
 
@@ -140,9 +119,7 @@ If a region has both an empty start and an empty end key, it is the only region
 ====
 
 In the (hopefully unlikely) event that programmatic processing of catalog metadata
-is required, see the
-+++<a href="http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/Writables.html#getHRegionInfo%28byte%5B%5D%29">Writables</a>+++
-utility.
+is required, see the link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html#parseFrom-byte:A-[RegionInfo.parseFrom] utility.
 
 [[arch.catalog.startup]]
 === Startup Sequencing
@@ -164,7 +141,7 @@ Should a region be reassigned either by the master load balancer or because a Re
 
 See <<master.runtime>> for more information about the impact of the Master on HBase Client communication.
 
-Administrative functions are done via an instance of link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Admin.html[Admin]
+Administrative functions are done via an instance of link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Admin.html[Admin]
 
 [[client.connections]]
 === Cluster Connections
@@ -180,12 +157,12 @@ Finally, be sure to cleanup your `Connection` instance before exiting.
 `Connections` are heavyweight objects but thread-safe so you can create one for your application and keep the instance around.
 `Table`, `Admin` and `RegionLocator` instances are lightweight.
 Create as you go and then let go as soon as you are done by closing them.
-See the link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/package-summary.html[Client Package Javadoc Description] for example usage of the new HBase 1.0 API.
+See the link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/package-summary.html[Client Package Javadoc Description] for example usage of the new HBase 1.0 API.
 
 ==== API before HBase 1.0.0
 
-Instances of `HTable` are the way to interact with an HBase cluster earlier than 1.0.0. _link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table] instances are not thread-safe_. Only one thread can use an instance of Table at any given time.
-When creating Table instances, it is advisable to use the same link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HBaseConfiguration[HBaseConfiguration] instance.
+Instances of `HTable` are the way to interact with an HBase cluster earlier than 1.0.0. _link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table] instances are not thread-safe_. Only one thread can use an instance of Table at any given time.
+When creating Table instances, it is advisable to use the same link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HBaseConfiguration[HBaseConfiguration] instance.
 This will ensure sharing of ZooKeeper and socket instances to the RegionServers which is usually what you want.
 For example, this is preferred:
 
@@ -206,7 +183,7 @@ HBaseConfiguration conf2 = HBaseConfiguration.create();
 HTable table2 = new HTable(conf2, "myTable");
 ----
 
-For more information about how connections are handled in the HBase client, see link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/ConnectionFactory.html[ConnectionFactory].
+For more information about how connections are handled in the HBase client, see link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/ConnectionFactory.html[ConnectionFactory].
 
 [[client.connection.pooling]]
 ===== Connection Pooling
@@ -230,19 +207,19 @@ try (Connection connection = ConnectionFactory.createConnection(conf);
 [WARNING]
 ====
 Previous versions of this guide discussed `HTablePool`, which was deprecated in HBase 0.94, 0.95, and 0.96, and removed in 0.98.1, by link:https://issues.apache.org/jira/browse/HBASE-6580[HBASE-6580], or `HConnection`, which is deprecated in HBase 1.0 by `Connection`.
-Please use link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Connection.html[Connection] instead.
+Please use link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Connection.html[Connection] instead.
 ====
 
 [[client.writebuffer]]
 === WriteBuffer and Batch Methods
 
-In HBase 1.0 and later, link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/HTable.html[HTable] is deprecated in favor of link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table]. `Table` does not use autoflush. To do buffered writes, use the BufferedMutator class.
+In HBase 1.0 and later, link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/HTable.html[HTable] is deprecated in favor of link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table]. `Table` does not use autoflush. To do buffered writes, use the BufferedMutator class.
 
-Before a `Table` or `HTable` instance is discarded, invoke either `close()` or `flushCommits()`, so `Put`s will not be lost.
+In HBase 2.0 and later, link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/HTable.html[HTable] does not use BufferedMutator to execute the ``Put`` operation. Refer to link:https://issues.apache.org/jira/browse/HBASE-18500[HBASE-18500] for more information.
 
 For additional information on write durability, review the link:/acid-semantics.html[ACID semantics] page.
 
-For fine-grained control of batching of ``Put``s or ``Delete``s, see the link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#batch%28java.util.List%29[batch] methods on Table.
+For fine-grained control of batching of ``Put``s or ``Delete``s, see the link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html#batch-java.util.List-java.lang.Object:A-[batch] methods on Table.
 
 [[async.client]]
 === Asynchronous Client ===
@@ -259,11 +236,11 @@ There are several differences for scan:
 * There is a `scanAll` method which will return all the results at once. It aims to provide a simpler way for small scans which you want to get the whole results at once usually.
 * The Observer Pattern. There is a scan method which accepts a `ScanResultConsumer` as a parameter. It will pass the results to the consumer.
 
-Notice that there are two types of asynchronous table, one is `AsyncTable` and the other is `RawAsyncTable`.
+Notice that `AsyncTable` interface is templatized. The template parameter specifies the type of `ScanResultConsumerBase` used by scans, which means the observer style scan APIs are different. The two types of scan consumers are - `ScanResultConsumer` and `AdvancedScanResultConsumer`.
 
-For `AsyncTable`, you need to provide a thread pool when getting it. The callbacks registered to the returned CompletableFuture will be executed in that thread pool. It is designed for normal users. You are free to do anything in the callbacks.
+`ScanResultConsumer` needs a separate thread pool which is used to execute the callbacks registered to the returned CompletableFuture. Because the use of separate thread pool frees up RPC threads, callbacks are free to do anything. Use this if the callbacks are not quick, or when in doubt.
 
-For `RawAsyncTable`, all the callbacks are executed inside the framework thread so it is not allowed to do time consuming works in the callbacks otherwise you may block the framework thread and cause very bad performance impact. It is designed for advanced users who want to write high performance code. You can see the `org.apache.hadoop.hbase.client.example.HttpProxyExample` to see how to write fully asynchronous code with `RawAsyncTable`. And coprocessor related methods are only in `RawAsyncTable`.
+`AdvancedScanResultConsumer` executes callbacks inside the framework thread. It is not allowed to do time consuming work in the callbacks else it will likely block the framework threads and cause very bad performance impact. As its name, it is designed for advanced users who want to write high performance code. See `org.apache.hadoop.hbase.client.example.HttpProxyExample` for how to write fully asynchronous code with it.
 
 [[async.admin]]
 === Asynchronous Admin ===
@@ -286,7 +263,7 @@ Information on non-Java clients and custom protocols is covered in <<external_ap
 [[client.filter]]
 == Client Request Filters
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Get.html[Get] and link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[Scan] instances can be optionally configured with link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/Filter.html[filters] which are applied on the RegionServer.
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Get.html[Get] and link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[Scan] instances can be optionally configured with link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/Filter.html[filters] which are applied on the RegionServer.
 
 Filters can be confusing because there are many different types, and it is best to approach them by understanding the groups of Filter functionality.
 
@@ -298,7 +275,7 @@ Structural Filters contain other Filters.
 [[client.filter.structural.fl]]
 ==== FilterList
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/FilterList.html[FilterList] represents a list of Filters with a relationship of `FilterList.Operator.MUST_PASS_ALL` or `FilterList.Operator.MUST_PASS_ONE` between the Filters.
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/FilterList.html[FilterList] represents a list of Filters with a relationship of `FilterList.Operator.MUST_PASS_ALL` or `FilterList.Operator.MUST_PASS_ONE` between the Filters.
 The following example shows an 'or' between two Filters (checking for either 'my value' or 'my other value' on the same attribute).
 
 [source,java]
@@ -307,14 +284,14 @@ FilterList list = new FilterList(FilterList.Operator.MUST_PASS_ONE);
 SingleColumnValueFilter filter1 = new SingleColumnValueFilter(
   cf,
   column,
-  CompareOp.EQUAL,
+  CompareOperator.EQUAL,
   Bytes.toBytes("my value")
   );
 list.add(filter1);
 SingleColumnValueFilter filter2 = new SingleColumnValueFilter(
   cf,
   column,
-  CompareOp.EQUAL,
+  CompareOperator.EQUAL,
   Bytes.toBytes("my other value")
   );
 list.add(filter2);
@@ -328,9 +305,9 @@ scan.setFilter(list);
 ==== SingleColumnValueFilter
 
 A SingleColumnValueFilter (see:
-http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.html)
-can be used to test column values for equivalence (`CompareOp.EQUAL`),
-inequality (`CompareOp.NOT_EQUAL`), or ranges (e.g., `CompareOp.GREATER`). The following is an
+https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.html)
+can be used to test column values for equivalence (`CompareOperaor.EQUAL`),
+inequality (`CompareOperaor.NOT_EQUAL`), or ranges (e.g., `CompareOperaor.GREATER`). The following is an
 example of testing equivalence of a column to a String value "my value"...
 
 [source,java]
@@ -338,7 +315,7 @@ example of testing equivalence of a column to a String value "my value"...
 SingleColumnValueFilter filter = new SingleColumnValueFilter(
   cf,
   column,
-  CompareOp.EQUAL,
+  CompareOperaor.EQUAL,
   Bytes.toBytes("my value")
   );
 scan.setFilter(filter);
@@ -353,7 +330,7 @@ These Comparators are used in concert with other Filters, such as <<client.filte
 [[client.filter.cvp.rcs]]
 ==== RegexStringComparator
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.html[RegexStringComparator] supports regular expressions for value comparisons.
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.html[RegexStringComparator] supports regular expressions for value comparisons.
 
 [source,java]
 ----
@@ -361,7 +338,7 @@ RegexStringComparator comp = new RegexStringComparator("my.");   // any value th
 SingleColumnValueFilter filter = new SingleColumnValueFilter(
   cf,
   column,
-  CompareOp.EQUAL,
+  CompareOperaor.EQUAL,
   comp
   );
 scan.setFilter(filter);
@@ -372,7 +349,7 @@ See the Oracle JavaDoc for link:http://download.oracle.com/javase/6/docs/api/jav
 [[client.filter.cvp.substringcomparator]]
 ==== SubstringComparator
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/SubstringComparator.html[SubstringComparator] can be used to determine if a given substring exists in a value.
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/SubstringComparator.html[SubstringComparator] can be used to determine if a given substring exists in a value.
 The comparison is case-insensitive.
 
 [source,java]
@@ -382,7 +359,7 @@ SubstringComparator comp = new SubstringComparator("y val");   // looking for 'm
 SingleColumnValueFilter filter = new SingleColumnValueFilter(
   cf,
   column,
-  CompareOp.EQUAL,
+  CompareOperaor.EQUAL,
   comp
   );
 scan.setFilter(filter);
@@ -391,12 +368,12 @@ scan.setFilter(filter);
 [[client.filter.cvp.bfp]]
 ==== BinaryPrefixComparator
 
-See link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.html[BinaryPrefixComparator].
+See link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.html[BinaryPrefixComparator].
 
 [[client.filter.cvp.bc]]
 ==== BinaryComparator
 
-See link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/BinaryComparator.html[BinaryComparator].
+See link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/BinaryComparator.html[BinaryComparator].
 
 [[client.filter.kvm]]
 === KeyValue Metadata
@@ -406,18 +383,18 @@ As HBase stores data internally as KeyValue pairs, KeyValue Metadata Filters eva
 [[client.filter.kvm.ff]]
 ==== FamilyFilter
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/FamilyFilter.html[FamilyFilter] can be used to filter on the ColumnFamily.
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/FamilyFilter.html[FamilyFilter] can be used to filter on the ColumnFamily.
 It is generally a better idea to select ColumnFamilies in the Scan than to do it with a Filter.
 
 [[client.filter.kvm.qf]]
 ==== QualifierFilter
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/QualifierFilter.html[QualifierFilter] can be used to filter based on Column (aka Qualifier) name.
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/QualifierFilter.html[QualifierFilter] can be used to filter based on Column (aka Qualifier) name.
 
 [[client.filter.kvm.cpf]]
 ==== ColumnPrefixFilter
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.html[ColumnPrefixFilter] can be used to filter based on the lead portion of Column (aka Qualifier) names.
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.html[ColumnPrefixFilter] can be used to filter based on the lead portion of Column (aka Qualifier) names.
 
 A ColumnPrefixFilter seeks ahead to the first column matching the prefix in each row and for each involved column family.
 It can be used to efficiently get a subset of the columns in very wide rows.
@@ -450,7 +427,7 @@ rs.close();
 [[client.filter.kvm.mcpf]]
 ==== MultipleColumnPrefixFilter
 
-link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.html[MultipleColumnPrefixFilter] behaves like ColumnPrefixFilter but allows specifying multiple prefixes.
+link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.html[MultipleColumnPrefixFilter] behaves like ColumnPrefixFilter but allows specifying multiple prefixes.
 
 Like ColumnPrefixFilter, MultipleColumnPrefixFilter efficiently seeks ahead to the first column matching the lowest prefix and also seeks past ranges of columns between prefixes.
 It can be used to efficiently get discontinuous sets of columns from very wide rows.
@@ -480,7 +457,7 @@ rs.close();
 [[client.filter.kvm.crf]]
 ==== ColumnRangeFilter
 
-A link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/ColumnRangeFilter.html[ColumnRangeFilter] allows efficient intra row scanning.
+A link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/ColumnRangeFilter.html[ColumnRangeFilter] allows efficient intra row scanning.
 
 A ColumnRangeFilter can seek ahead to the first matching column for each involved column family.
 It can be used to efficiently get a 'slice' of the columns of a very wide row.
@@ -521,7 +498,7 @@ Note:  Introduced in HBase 0.92
 [[client.filter.row.rf]]
 ==== RowFilter
 
-It is generally a better idea to use the startRow/stopRow methods on Scan for row selection, however link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/RowFilter.html[RowFilter] can also be used.
+It is generally a better idea to use the startRow/stopRow methods on Scan for row selection, however link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/RowFilter.html[RowFilter] can also be used.
 
 [[client.filter.utility]]
 === Utility
@@ -530,7 +507,7 @@ It is generally a better idea to use the startRow/stopRow methods on Scan for ro
 ==== FirstKeyOnlyFilter
 
 This is primarily used for rowcount jobs.
-See link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.html[FirstKeyOnlyFilter].
+See link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.html[FirstKeyOnlyFilter].
 
 [[architecture.master]]
 == Master
@@ -580,7 +557,7 @@ See <<regions.arch.assignment>> for more information on region assignment.
 ==== CatalogJanitor
 
 Periodically checks and cleans up the `hbase:meta` table.
-See <arch.catalog.meta>> for more information on the meta table.
+See <<arch.catalog.meta>> for more information on the meta table.
 
 [[regionserver.arch]]
 == RegionServer
@@ -657,7 +634,7 @@ However, latencies tend to be less erratic across time, because there is less ga
 If the BucketCache is deployed in off-heap mode, this memory is not managed by the GC at all.
 This is why you'd use BucketCache, so your latencies are less erratic and to mitigate GCs and heap fragmentation.
 See Nick Dimiduk's link:http://www.n10k.com/blog/blockcache-101/[BlockCache 101] for comparisons running on-heap vs off-heap tests.
-Also see link:http://people.apache.org/~stack/bc/[Comparing BlockCache Deploys] which finds that if your dataset fits inside your LruBlockCache deploy, use it otherwise if you are experiencing cache churn (or you want your cache to exist beyond the vagaries of java GC), use BucketCache.
+Also see link:https://people.apache.org/~stack/bc/[Comparing BlockCache Deploys] which finds that if your dataset fits inside your LruBlockCache deploy, use it otherwise if you are experiencing cache churn (or you want your cache to exist beyond the vagaries of java GC), use BucketCache.
 
 When you enable BucketCache, you are enabling a two tier caching system, an L1 cache which is implemented by an instance of LruBlockCache and an off-heap L2 cache which is implemented by BucketCache.
 Management of these two tiers and the policy that dictates how blocks move between them is done by `CombinedBlockCache`.
@@ -668,7 +645,7 @@ See <<offheap.blockcache>> for more detail on going off-heap.
 ==== General Cache Configurations
 
 Apart from the cache implementation itself, you can set some general configuration options to control how the cache performs.
-See http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html.
+See https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html.
 After setting any of these options, restart or rolling restart your cluster for the configuration to take effect.
 Check logs for errors or unexpected behavior.
 
@@ -730,9 +707,9 @@ Your data is not the only resident of the block cache.
 Here are others that you may have to take into account:
 
 Catalog Tables::
-  The `-ROOT-` (prior to HBase 0.96, see <<arch.catalog.root,arch.catalog.root>>) and `hbase:meta` tables are forced into the block cache and have the in-memory priority which means that they are harder to evict.
-  The former never uses more than a few hundred bytes while the latter can occupy a few MBs
-  (depending on the number of regions).
+  The `hbase:meta` table is forced into the block cache and have the in-memory priority which means that they are harder to evict.
+
+NOTE: The hbase:meta tables can occupy a few MBs depending on the number of regions.
 
 HFiles Indexes::
   An _HFile_ is the file format that HBase uses to store data in HDFS.
@@ -778,7 +755,7 @@ Since link:https://issues.apache.org/jira/browse/HBASE-4683[HBASE-4683 Always ca
 ===== How to Enable BucketCache
 
 The usual deploy of BucketCache is via a managing class that sets up two caching tiers: an L1 on-heap cache implemented by LruBlockCache and a second L2 cache implemented with BucketCache.
-The managing class is link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.html[CombinedBlockCache] by default.
+The managing class is link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.html[CombinedBlockCache] by default.
 The previous link describes the caching 'policy' implemented by CombinedBlockCache.
 In short, it works by keeping meta blocks -- INDEX and BLOOM in the L1, on-heap LruBlockCache tier -- and DATA blocks are kept in the L2, BucketCache tier.
 It is possible to amend this behavior in HBase since version 1.0 and ask that a column family have both its meta and DATA blocks hosted on-heap in the L1 tier by setting `cacheDataInL1` via `(HColumnDescriptor.setCacheDataInL1(true)` or in the shell, creating or amending column families setting `CACHE_DATA_IN_L1` to true: e.g.
@@ -904,7 +881,7 @@ The compressed BlockCache is disabled by default. To enable it, set `hbase.block
 
 As write requests are handled by the region server, they accumulate in an in-memory storage system called the _memstore_. Once the memstore fills, its content are written to disk as additional store files. This event is called a _memstore flush_. As store files accumulate, the RegionServer will <<compaction,compact>> them into fewer, larger files. After each flush or compaction finishes, the amount of data stored in the region has changed. The RegionServer consults the region split policy to determine if the region has grown too large or should be split for another policy-specific reason. A region split request is enqueued if the policy recommends it.
 
-Logically, the process of splitting a region is simple. We find a suitable point in the keyspace of the region where we should divide the region in half, then split the region's data into two new regions at that point. The details of the process however are not simple.  When a split happens, the newly created _daughter regions_ do not rewrite all the data into new files immediately. Instead, they create small files similar to symbolic link files, named link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/Reference.html[Reference files], which point to either the top or bottom part of the parent store file according to the split point. The reference file is used just like a regular data file, but only half of the records are considered. The region can only be split if there are no more references to the immutable data files of the parent region. Those reference files are cleaned gradually by compactions, so that the region will stop referring to its parents files, and c
 an be split further.
+Logically, the process of splitting a region is simple. We find a suitable point in the keyspace of the region where we should divide the region in half, then split the region's data into two new regions at that point. The details of the process however are not simple.  When a split happens, the newly created _daughter regions_ do not rewrite all the data into new files immediately. Instead, they create small files similar to symbolic link files, named link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/Reference.html[Reference files], which point to either the top or bottom part of the parent store file according to the split point. The reference file is used just like a regular data file, but only half of the records are considered. The region can only be split if there are no more references to the immutable data files of the parent region. Those reference files are cleaned gradually by compactions, so that the region will stop referring to its parents files, and 
 can be split further.
 
 Although splitting the region is a local decision made by the RegionServer, the split process itself must coordinate with many actors. The RegionServer notifies the Master before and after the split, updates the `.META.` table so that clients can discover the new daughter regions, and rearranges the directory structure and data files in HDFS. Splitting is a multi-task process. To enable rollback in case of an error, the RegionServer keeps an in-memory journal about the execution state. The steps taken by the RegionServer to execute the split are illustrated in <<regionserver_split_process_image>>. Each step is labeled with its step number. Actions from RegionServers or Master are shown in red, while actions from the clients are show in green.
 
@@ -938,7 +915,7 @@ Under normal operations, the WAL is not needed because data changes move from th
 However, if a RegionServer crashes or becomes unavailable before the MemStore is flushed, the WAL ensures that the changes to the data can be replayed.
 If writing to the WAL fails, the entire operation to modify the data fails.
 
-HBase uses an implementation of the link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/wal/WAL.html[WAL] interface.
+HBase uses an implementation of the link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/wal/WAL.html[WAL] interface.
 Usually, there is only one instance of a WAL per RegionServer.
 The RegionServer records Puts and Deletes to it, before recording them to the <<store.memstore>> for the affected <<store>>.
 
@@ -1191,30 +1168,21 @@ Due to an asynchronous implementation, in very rare cases, the split log manager
 For that reason, it periodically checks for remaining uncompleted task in its task map or ZooKeeper.
 If none are found, it throws an exception so that the log splitting can be retried right away instead of hanging there waiting for something that won't happen.
 
+[[wal.compression]]
+==== WAL Compression ====
 
-[[distributed.log.replay]]
-====== Distributed Log Replay
-
-After a RegionServer fails, its failed regions are assigned to another RegionServer, which are marked as "recovering" in ZooKeeper.
-A split log worker directly replays edits from the WAL of the failed RegionServer to the regions at its new location.
-When a region is in "recovering" state, it can accept writes but no reads (including Append and Increment), region splits or merges.
-
-Distributed Log Replay extends the <<distributed.log.splitting>> framework.
-It works by directly replaying WAL edits to another RegionServer instead of creating _recovered.edits_ files.
-It provides the following advantages over distributed log splitting alone:
-
-* It eliminates the overhead of writing and reading a large number of _recovered.edits_ files.
-  It is not unusual for thousands of _recovered.edits_ files to be created and written concurrently during a RegionServer recovery.
-  Many small random writes can degrade overall system performance.
-* It allows writes even when a region is in recovering state.
-  It only takes seconds for a recovering region to accept writes again.
+The content of the WAL can be compressed using LRU Dictionary compression.
+This can be used to speed up WAL replication to different datanodes.
+The dictionary can store up to 2^15^ elements; eviction starts after this number is exceeded.
 
-.Enabling Distributed Log Replay
-To enable distributed log replay, set `hbase.master.distributed.log.replay` to `true`.
-This will be the default for HBase 0.99 (link:https://issues.apache.org/jira/browse/HBASE-10888[HBASE-10888]).
+To enable WAL compression, set the `hbase.regionserver.wal.enablecompression` property to `true`.
+The default value for this property is `false`.
+By default, WAL tag compression is turned on when WAL compression is enabled.
+You can turn off WAL tag compression by setting the `hbase.regionserver.wal.tags.enablecompression` property to 'false'.
 
-You must also enable HFile version 3 (which is the default HFile format starting in HBase 0.99.
-See link:https://issues.apache.org/jira/browse/HBASE-10855[HBASE-10855]). Distributed log replay is unsafe for rolling upgrades.
+A possible downside to WAL compression is that we lose more data from the last block in the WAL if it ill-terminated
+mid-write. If entries in this last block were added with new dictionary entries but we failed persist the amended
+dictionary because of an abrupt termination, a read of this last block may not be able to resolve last-written entries.
 
 [[wal.disable]]
 ==== Disabling the WAL
@@ -1396,12 +1364,12 @@ The HDFS client does the following by default when choosing locations to write r
 . Second replica is written to a random node on another rack
 . Third replica is written on the same rack as the second, but on a different node chosen randomly
 . Subsequent replicas are written on random nodes on the cluster.
-  See _Replica Placement: The First Baby Steps_ on this page: link:http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html[HDFS Architecture]
+  See _Replica Placement: The First Baby Steps_ on this page: link:https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html[HDFS Architecture]
 
 Thus, HBase eventually achieves locality for a region after a flush or a compaction.
 In a RegionServer failover situation a RegionServer may be assigned regions with non-local StoreFiles (because none of the replicas are local), however as new data is written in the region, or the table is compacted and StoreFiles are re-written, they will become "local" to the RegionServer.
 
-For more information, see _Replica Placement: The First Baby Steps_ on this page: link:http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html[HDFS Architecture] and also Lars George's blog on link:http://www.larsgeorge.com/2010/05/hbase-file-locality-in-hdfs.html[HBase and HDFS locality].
+For more information, see _Replica Placement: The First Baby Steps_ on this page: link:https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html[HDFS Architecture] and also Lars George's blog on link:http://www.larsgeorge.com/2010/05/hbase-file-locality-in-hdfs.html[HBase and HDFS locality].
 
 [[arch.region.splits]]
 === Region Splits
@@ -1416,9 +1384,9 @@ See <<disable.splitting>> for how to manually manage splits (and for why you mig
 
 ==== Custom Split Policies
 You can override the default split policy using a custom
-link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.html[RegionSplitPolicy](HBase 0.94+).
+link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.html[RegionSplitPolicy](HBase 0.94+).
 Typically a custom split policy should extend HBase's default split policy:
-link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.html[IncreasingToUpperBoundRegionSplitPolicy].
+link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.html[IncreasingToUpperBoundRegionSplitPolicy].
 
 The policy can set globally through the HBase configuration or on a per-table
 basis.
@@ -1492,13 +1460,13 @@ Using a Custom Algorithm::
   As parameters, you give it the algorithm, desired number of regions, and column families.
   It includes two split algorithms.
   The first is the
-  `link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.HexStringSplit.html[HexStringSplit]`
+  `link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.HexStringSplit.html[HexStringSplit]`
   algorithm, which assumes the row keys are hexadecimal strings.
   The second,
-  `link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html[UniformSplit]`,
+  `link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html[UniformSplit]`,
   assumes the row keys are random byte arrays.
   You will probably need to develop your own
-  `link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html[SplitAlgorithm]`,
+  `link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html[SplitAlgorithm]`,
   using the provided ones as models.
 
 === Online Region Merges
@@ -1574,7 +1542,7 @@ StoreFiles are where your data lives.
 
 ===== HFile Format
 
-The _HFile_ file format is based on the SSTable file described in the link:http://research.google.com/archive/bigtable.html[BigTable [2006]] paper and on Hadoop's link:http://hadoop.apache.org/common/docs/current/api/org/apache/hadoop/io/file/tfile/TFile.html[TFile] (The unit test suite and the compression harness were taken directly from TFile). Schubert Zhang's blog post on link:http://cloudepr.blogspot.com/2009/09/hfile-block-indexed-file-format-to.html[HFile: A Block-Indexed File Format to Store Sorted Key-Value Pairs] makes for a thorough introduction to HBase's HFile.
+The _HFile_ file format is based on the SSTable file described in the link:http://research.google.com/archive/bigtable.html[BigTable [2006]] paper and on Hadoop's link:https://hadoop.apache.org/common/docs/current/api/org/apache/hadoop/io/file/tfile/TFile.html[TFile] (The unit test suite and the compression harness were taken directly from TFile). Schubert Zhang's blog post on link:http://cloudepr.blogspot.com/2009/09/hfile-block-indexed-file-format-to.html[HFile: A Block-Indexed File Format to Store Sorted Key-Value Pairs] makes for a thorough introduction to HBase's HFile.
 Matteo Bertozzi has also put up a helpful description, link:http://th30z.blogspot.com/2011/02/hbase-io-hfile.html?spref=tw[HBase I/O: HFile].
 
 For more information, see the HFile source code.
@@ -2086,6 +2054,107 @@ Why?
 
 NOTE: This information is now included in the configuration parameter table in <<compaction.parameters>>.
 
+[[ops.date.tiered]]
+===== Date Tiered Compaction
+
+Date tiered compaction is a date-aware store file compaction strategy that is beneficial for time-range scans for time-series data.
+
+[[ops.date.tiered.when]]
+===== When To Use Date Tiered Compactions
+
+Consider using Date Tiered Compaction for reads for limited time ranges, especially scans of recent data
+
+Don't use it for
+
+* random gets without a limited time range
+* frequent deletes and updates
+* Frequent out of order data writes creating long tails, especially writes with future timestamps
+* frequent bulk loads with heavily overlapping time ranges
+
+.Performance Improvements
+Performance testing has shown that the performance of time-range scans improve greatly for limited time ranges, especially scans of recent data.
+
+[[ops.date.tiered.enable]]
+====== Enabling Date Tiered Compaction
+
+You can enable Date Tiered compaction for a table or a column family, by setting its `hbase.hstore.engine.class` to `org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine`.
+
+You also need to set `hbase.hstore.blockingStoreFiles` to a high number, such as 60, if using all default settings, rather than the default value of 12). Use 1.5~2 x projected file count if changing the parameters, Projected file count = windows per tier x tier count + incoming window min + files older than max age
+
+You also need to set `hbase.hstore.compaction.max` to the same value as `hbase.hstore.blockingStoreFiles` to unblock major compaction.
+
+.Procedure: Enable Date Tiered Compaction
+. Run one of following commands in the HBase shell.
+  Replace the table name `orders_table` with the name of your table.
++
+[source,sql]
+----
+alter 'orders_table', CONFIGURATION => {'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine', 'hbase.hstore.blockingStoreFiles' => '60', 'hbase.hstore.compaction.min'=>'2', 'hbase.hstore.compaction.max'=>'60'}
+alter 'orders_table', {NAME => 'blobs_cf', CONFIGURATION => {'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine', 'hbase.hstore.blockingStoreFiles' => '60', 'hbase.hstore.compaction.min'=>'2', 'hbase.hstore.compaction.max'=>'60'}}
+create 'orders_table', 'blobs_cf', CONFIGURATION => {'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine', 'hbase.hstore.blockingStoreFiles' => '60', 'hbase.hstore.compaction.min'=>'2', 'hbase.hstore.compaction.max'=>'60'}
+----
+
+. Configure other options if needed.
+  See <<ops.date.tiered.config>> for more information.
+
+.Procedure: Disable Date Tiered Compaction
+. Set the `hbase.hstore.engine.class` option to either nil or `org.apache.hadoop.hbase.regionserver.DefaultStoreEngine`.
+  Either option has the same effect.
+  Make sure you set the other options you changed to the original settings too.
++
+[source,sql]
+----
+alter 'orders_table', CONFIGURATION => {'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DefaultStoreEngine', 'hbase.hstore.blockingStoreFiles' => '12', 'hbase.hstore.compaction.min'=>'6', 'hbase.hstore.compaction.max'=>'12'}}
+----
+
+When you change the store engine either way, a major compaction will likely be performed on most regions.
+This is not necessary on new tables.
+
+[[ops.date.tiered.config]]
+====== Configuring Date Tiered Compaction
+
+Each of the settings for date tiered compaction should be configured at the table or column family level.
+If you use HBase shell, the general command pattern is as follows:
+
+[source,sql]
+----
+alter 'orders_table', CONFIGURATION => {'key' => 'value', ..., 'key' => 'value'}}
+----
+
+[[ops.date.tiered.config.parameters]]
+.Tier Parameters
+
+You can configure your date tiers by changing the settings for the following parameters:
+
+.Date Tier Parameters
+[cols="1,1a", frame="all", options="header"]
+|===
+| Setting
+| Notes
+
+|`hbase.hstore.compaction.date.tiered.max.storefile.age.millis`
+|Files with max-timestamp smaller than this will no longer be compacted.Default at Long.MAX_VALUE.
+
+| `hbase.hstore.compaction.date.tiered.base.window.millis`
+| Base window size in milliseconds. Default at 6 hours.
+
+| `hbase.hstore.compaction.date.tiered.windows.per.tier`
+| Number of windows per tier. Default at 4.
+
+| `hbase.hstore.compaction.date.tiered.incoming.window.min`
+| Minimal number of files to compact in the incoming window. Set it to expected number of files in the window to avoid wasteful compaction. Default at 6.
+
+| `hbase.hstore.compaction.date.tiered.window.policy.class`
+| The policy to select store files within the same time window. It doesn’t apply to the incoming window. Default at exploring compaction. This is to avoid wasteful compaction.
+|===
+
+[[ops.date.tiered.config.compaction.throttler]]
+.Compaction Throttler
+
+With tiered compaction all servers in the cluster will promote windows to higher tier at the same time, so using a compaction throttle is recommended:
+Set `hbase.regionserver.throughput.controller` to `org.apache.hadoop.hbase.regionserver.compactions.PressureAwareCompactionThroughputController`.
+
+NOTE: For more information about date tiered compaction, please refer to the design specification at https://docs.google.com/document/d/1_AmlNb2N8Us1xICsTeGDLKIqL6T-oHoRLZ323MG_uy8
 [[ops.stripe]]
 ===== Experimental: Stripe Compactions
 
@@ -2299,7 +2368,7 @@ See the `LoadIncrementalHFiles` class for more information.
 
 As HBase runs on HDFS (and each StoreFile is written as a file on HDFS), it is important to have an understanding of the HDFS Architecture especially in terms of how it stores files, handles failovers, and replicates blocks.
 
-See the Hadoop documentation on link:http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html[HDFS Architecture] for more information.
+See the Hadoop documentation on link:https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html[HDFS Architecture] for more information.
 
 [[arch.hdfs.nn]]
 === NameNode
@@ -2703,7 +2772,7 @@ if (result.isStale()) {
 === Resources
 
 . More information about the design and implementation can be found at the jira issue: link:https://issues.apache.org/jira/browse/HBASE-10070[HBASE-10070]
-. HBaseCon 2014 link:http://hbasecon.com/sessions/#session15[talk] also contains some details and link:http://www.slideshare.net/enissoz/hbase-high-availability-for-reads-with-time[slides].
+. HBaseCon 2014 talk: link:https://hbase.apache.org/www.hbasecon.com/#2014-PresentationsRecordings[HBase Read High Availability Using Timeline-Consistent Region Replicas] also contains some details and link:http://www.slideshare.net/enissoz/hbase-high-availability-for-reads-with-time[slides].
 
 ifdef::backend-docbook[]
 [index]

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/asf.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/asf.adoc b/src/main/asciidoc/_chapters/asf.adoc
index 47c29e5..18cf95a 100644
--- a/src/main/asciidoc/_chapters/asf.adoc
+++ b/src/main/asciidoc/_chapters/asf.adoc
@@ -35,13 +35,13 @@ HBase is a project in the Apache Software Foundation and as such there are respo
 [[asf.devprocess]]
 === ASF Development Process
 
-See the link:http://www.apache.org/dev/#committers[Apache Development Process page]            for all sorts of information on how the ASF is structured (e.g., PMC, committers, contributors), to tips on contributing and getting involved, and how open-source works at ASF.
+See the link:https://www.apache.org/dev/#committers[Apache Development Process page]            for all sorts of information on how the ASF is structured (e.g., PMC, committers, contributors), to tips on contributing and getting involved, and how open-source works at ASF.
 
 [[asf.reporting]]
 === ASF Board Reporting
 
 Once a quarter, each project in the ASF portfolio submits a report to the ASF board.
 This is done by the HBase project lead and the committers.
-See link:http://www.apache.org/foundation/board/reporting[ASF board reporting] for more information.
+See link:https://www.apache.org/foundation/board/reporting[ASF board reporting] for more information.
 
 :numbered: