You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by nd...@apache.org on 2015/05/12 00:50:52 UTC

hbase git commit: HBASE-13665 Fix docs and site building on branch-1

Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 68a6c2b91 -> d1b0dbee6


HBASE-13665 Fix docs and site building on branch-1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d1b0dbee
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d1b0dbee
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d1b0dbee

Branch: refs/heads/branch-1.0
Commit: d1b0dbee621463e429ff7893c5b81a119416ae2c
Parents: 68a6c2b
Author: Nick Dimiduk <nd...@apache.org>
Authored: Mon May 11 15:35:59 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Mon May 11 15:47:15 2015 -0700

----------------------------------------------------------------------
 pom.xml                                         |  66 +++++++++--
 .../asciidoc/_chapters/appendix_acl_matrix.adoc |  94 ++++++++--------
 src/main/asciidoc/_chapters/architecture.adoc   |  14 +++
 src/main/asciidoc/_chapters/configuration.adoc  |  38 ++++---
 src/main/asciidoc/_chapters/developer.adoc      |  15 ++-
 .../asciidoc/_chapters/getting_started.adoc     |  10 +-
 src/main/asciidoc/_chapters/hbase_apis.adoc     | 109 +++++++++----------
 src/main/asciidoc/_chapters/mapreduce.adoc      |  27 +++--
 src/main/asciidoc/_chapters/upgrading.adoc      |   8 +-
 .../resources/images/hbase_logo_with_orca.png   | Bin 0 -> 11618 bytes
 .../resources/images/hbase_logo_with_orca.xcf   | Bin 0 -> 84265 bytes
 .../images/jumping-orca_transparent_rotated.xcf | Bin 0 -> 135399 bytes
 .../resources/images/region_split_process.png   | Bin 0 -> 338255 bytes
 src/main/site/site.xml                          |  37 ++++---
 src/main/site/xdoc/index.xml                    |  12 +-
 15 files changed, 265 insertions(+), 165 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index dd6cd09..3812165 100644
--- a/pom.xml
+++ b/pom.xml
@@ -988,25 +988,75 @@
       <plugin>
         <groupId>org.asciidoctor</groupId>
         <artifactId>asciidoctor-maven-plugin</artifactId>
-        <version>1.5.2</version> 
+        <version>1.5.2</version>
+        <inherited>false</inherited>
+        <dependencies>
+          <dependency>
+            <groupId>org.asciidoctor</groupId>
+            <artifactId>asciidoctorj-pdf</artifactId>
+            <version>1.5.0-alpha.6</version>
+          </dependency>
+        </dependencies>
+        <configuration>
+          <outputDirectory>target/site</outputDirectory>
+          <doctype>book</doctype>
+          <imagesDir>images</imagesDir>
+          <sourceHighlighter>coderay</sourceHighlighter>
+          <attributes>
+            <docVersion>${project.version}</docVersion>
+          </attributes>
+        </configuration>
         <executions>
           <execution>
-            <id>output-html</id> 
+            <id>output-html</id>
+            <phase>site</phase>
             <goals>
-              <goal>process-asciidoc</goal> 
+              <goal>process-asciidoc</goal>
             </goals>
-            <phase>site</phase>
             <configuration>
-              <imagesDir>./images</imagesDir>
-              <doctype>book</doctype>
               <attributes>
                 <stylesheet>hbase.css</stylesheet>
               </attributes>
               <backend>html5</backend>
-              <sourceHighlighter>coderay</sourceHighlighter>
-              <outputDirectory>target/site</outputDirectory>
             </configuration>
           </execution>
+          <execution>
+            <id>output-pdf</id>
+            <phase>site</phase>
+            <goals>
+              <goal>process-asciidoc</goal>
+            </goals>
+            <configuration>
+              <backend>pdf</backend>
+              <attributes>
+                <pagenums/>
+                <toc/>
+                <idprefix/>
+                <idseparator>-</idseparator>
+              </attributes>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <version>${maven.antrun.version}</version>
+        <inherited>false</inherited>
+        <!-- Rename the book.pdf generated by asciidoctor -->
+        <executions>
+          <execution>
+            <id>rename-pdf</id>
+            <phase>post-site</phase>
+            <configuration>
+              <target name="rename file">
+                <move file="${project.basedir}/target/site/book.pdf" tofile="${project.basedir}/target/site/apache_hbase_reference_guide.pdf" />
+                <move file="${project.basedir}/target/site/book.pdfmarks" tofile="${project.basedir}/target/site/apache_hbase_reference_guide.pdfmarks" />
+              </target>
+            </configuration>
+            <goals>
+              <goal>run</goal>
+            </goals>
+          </execution>
         </executions>
       </plugin>
       <plugin>

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
index bf35c1a..cb285f3 100644
--- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
+++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
@@ -81,77 +81,77 @@ In case the table goes out of date, the unit tests which check for accuracy of p
 |===
 | Interface | Operation | Permissions
 | Master | createTable | superuser\|global\(C)\|NS\(C)
-|        | modifyTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C)
-|        | deleteTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C)
-|        | truncateTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C)
-|        | addColumn | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C)
-|        | modifyColumn | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C)\|column(A)\|column\(C)
-|        | deleteColumn | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C)\|column(A)\|column\(C)
-|        | enableTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C)
-|        | disableTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C)
+|        | modifyTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|TableOwner\|table(A)\|table\(C)
+|        | deleteTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|TableOwner\|table(A)\|table\(C)
+|        | truncateTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|TableOwner\|table(A)\|table\(C)
+|        | addColumn | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|TableOwner\|table(A)\|table\(C)
+|        | modifyColumn | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|TableOwner\|table(A)\|table\(C)\|column(A)\|column\(C)
+|        | deleteColumn | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|TableOwner\|table(A)\|table\(C)\|column(A)\|column\(C)
+|        | enableTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|TableOwner\|table(A)\|table\(C)
+|        | disableTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|TableOwner\|table(A)\|table\(C)
 |        | disableAclTable | Not allowed
-|        | move | superuser\|global(A)\|NS(A)\|Table(A)
-|        | assign | superuser\|global(A)\|NS(A)\|Table(A)
-|        | unassign | superuser\|global(A)\|NS(A)\|Table(A)
-|        | regionOffline | superuser\|global(A)\|NS(A)\|Table(A)
+|        | move | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
+|        | assign | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
+|        | unassign | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
+|        | regionOffline | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
 |        | balance | superuser\|global(A)
 |        | balanceSwitch | superuser\|global(A)
 |        | shutdown | superuser\|global(A)
 |        | stopMaster | superuser\|global(A)
-|        | snapshot | superuser\|global(A)\|NS(A)\|Table(A)
+|        | snapshot | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
 |        | listSnapshot | superuser\|global(A)\|SnapshotOwner
 |        | cloneSnapshot | superuser\|global(A)
-|        | restoreSnapshot | superuser\|global(A)\|SnapshotOwner & (NS(A)\|Table(A))
+|        | restoreSnapshot | superuser\|global(A)\|SnapshotOwner & (NS(A)\|TableOwner\|table(A))
 |        | deleteSnapshot | superuser\|global(A)\|SnapshotOwner
 |        | createNamespace | superuser\|global(A)
 |        | deleteNamespace | superuser\|global(A)
 |        | modifyNamespace | superuser\|global(A)
 |        | getNamespaceDescriptor | superuser\|global(A)\|NS(A)
 |        | listNamespaceDescriptors* | superuser\|global(A)\|NS(A)
-|        | flushTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS(\C)\|table(A)\|table\(C)
-|        | getTableDescriptors* | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C)
-|        | getTableNames* | Any global or table perm
+|        | flushTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|TableOwner\|table(A)\|table\(C)
+|        | getTableDescriptors* | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|TableOwner\|table(A)\|table\(C)
+|        | getTableNames* | superuser\|TableOwner\|Any global or table perm
 |        | setUserQuota(global level) | superuser\|global(A)
 |        | setUserQuota(namespace level) | superuser\|global(A)
-|        | setUserQuota(Table level) | superuser\|global(A)\|NS(A)\|Table(A)
-|        | setTableQuota | superuser\|global(A)\|NS(A)\|Table(A)
+|        | setUserQuota(Table level) | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
+|        | setTableQuota | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
 |        | setNamespaceQuota | superuser\|global(A)
 | Region | openRegion | superuser\|global(A)
 |        | closeRegion | superuser\|global(A)
-|        | flush | superuser\|global(A)\|global\(C)\|table(A)\|table\(C)
-|        | split | superuser\|global(A)\|Table(A)
-|        | compact | superuser\|global(A)\|global\(C)\|table(A)\|table\(C)
-|        | getClosestRowBefore | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R)
-|        | getOp | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R)
-|        | exists | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R)
-|        | put | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W)
-|        | delete | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W)
-|        | batchMutate | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W)
-|        | checkAndPut | superuser\|global(RW)\|NS(RW)\|Table(RW)\|CF(RW)\|CQ(RW)
-|        | checkAndPutAfterRowLock | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R)
-|        | checkAndDelete   | superuser\|global(RW)\|NS(RW)\|Table(RW)\|CF(RW)\|CQ(RW)
-|        | checkAndDeleteAfterRowLock | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R)
-|        | incrementColumnValue | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W)
-|        | append | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W)
-|        | appendAfterRowLock | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W)
-|        | increment | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W)
-|        | incrementAfterRowLock | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W)
-|        | scannerOpen | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R)
-|        | scannerNext | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R)
-|        | scannerClose | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R)
-|        | bulkLoadHFile | superuser\|global\(C)\|table\(C)\|CF\(C)
-|        | prepareBulkLoad | superuser\|global\(C)\|table\(C)\|CF\(C)
-|        | cleanupBulkLoad | superuser\|global\(C)\|table\(C)\|CF\(C)
-| Endpoint | invoke | superuser\|global(X)\|NS(X)\|Table(X)
+|        | flush | superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)
+|        | split | superuser\|global(A)\|TableOwner\|TableOwner\|table(A)
+|        | compact | superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)
+|        | getClosestRowBefore | superuser\|global\(R)\|NS\(R)\|TableOwner\|table\(R)\|CF\(R)\|CQ\(R)
+|        | getOp | superuser\|global\(R)\|NS\(R)\|TableOwner\|table\(R)\|CF\(R)\|CQ\(R)
+|        | exists | superuser\|global\(R)\|NS\(R)\|TableOwner\|table\(R)\|CF\(R)\|CQ\(R)
+|        | put | superuser\|global(W)\|NS(W)\|table(W)\|TableOwner\|CF(W)\|CQ(W)
+|        | delete | superuser\|global(W)\|NS(W)\|table(W)\|TableOwner\|CF(W)\|CQ(W)
+|        | batchMutate | superuser\|global(W)\|NS(W)\|TableOwner\|table(W)\|CF(W)\|CQ(W)
+|        | checkAndPut | superuser\|global(RW)\|NS(RW)\|TableOwner\|table(RW)\|CF(RW)\|CQ(RW)
+|        | checkAndPutAfterRowLock | superuser\|global\(R)\|NS\(R)\|TableOwner\|Table\(R)\|CF\(R)\|CQ\(R)
+|        | checkAndDelete   | superuser\|global(RW)\|NS(RW)\|TableOwner\|table(RW)\|CF(RW)\|CQ(RW)
+|        | checkAndDeleteAfterRowLock | superuser\|global\(R)\|NS\(R)\|TableOwner\|table\(R)\|CF\(R)\|CQ\(R)
+|        | incrementColumnValue | superuser\|global(W)\|NS(W)\|TableOwner\|table(W)\|CF(W)\|CQ(W)
+|        | append | superuser\|global(W)\|NS(W)\|TableOwner\|table(W)\|CF(W)\|CQ(W)
+|        | appendAfterRowLock | superuser\|global(W)\|NS(W)\|TableOwner\|table(W)\|CF(W)\|CQ(W)
+|        | increment | superuser\|global(W)\|NS(W)\|TableOwner\|table(W)\|CF(W)\|CQ(W)
+|        | incrementAfterRowLock | superuser\|global(W)\|NS(W)\|TableOwner\|table(W)\|CF(W)\|CQ(W)
+|        | scannerOpen | superuser\|global\(R)\|NS\(R)\|TableOwner\|table\(R)\|CF\(R)\|CQ\(R)
+|        | scannerNext | superuser\|global\(R)\|NS\(R)\|TableOwner\|table\(R)\|CF\(R)\|CQ\(R)
+|        | scannerClose | superuser\|global\(R)\|NS\(R)\|TableOwner\|table\(R)\|CF\(R)\|CQ\(R)
+|        | bulkLoadHFile | superuser\|global\(C)\|TableOwner\|table\(C)\|CF\(C)
+|        | prepareBulkLoad | superuser\|global\(C)\|TableOwner\|table\(C)\|CF\(C)
+|        | cleanupBulkLoad | superuser\|global\(C)\|TableOwner\|table\(C)\|CF\(C)
+| Endpoint | invoke | superuser\|global(X)\|NS(X)\|TableOwner\|table(X)
 | AccessController | grant(global level) | global(A)
 |                  | grant(namespace level) | global(A)\|NS(A)
-|                  | grant(table level) | global(A)\|NS(A)\|table(A)\|CF(A)\|CQ(A)
+|                  | grant(table level) | global(A)\|NS(A)\|TableOwner\|table(A)\|CF(A)\|CQ(A)
 |                  | revoke(global level) | global(A)
 |                  | revoke(namespace level) | global(A)\|NS(A)
-|                  | revoke(table level) | global(A)\|NS(A)\|table(A)\|CF(A)\|CQ(A)
+|                  | revoke(table level) | global(A)\|NS(A)\|TableOwner\|table(A)\|CF(A)\|CQ(A)
 |                  | getUserPermissions(global level) | global(A)
 |                  | getUserPermissions(namespace level) | global(A)\|NS(A)
-|                  | getUserPermissions(table level) | global(A)\|NS(A)\|table(A)\|CF(A)\|CQ(A)
+|                  | getUserPermissions(table level) | global(A)\|NS(A)\|TableOwner\|table(A)\|CF(A)\|CQ(A)
 | RegionServer | stopRegionServer | superuser\|global(A)
 |              | mergeRegions | superuser\|global(A)
 |              | rollWALWriterRequest | superuser\|global(A)

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/asciidoc/_chapters/architecture.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc
index 0236d81..659c4ee 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -2327,6 +2327,20 @@ Instead you can change the number of region replicas per table to increase or de
     The period (in milliseconds) for refreshing the store files for the secondary regions. 0 means this feature is disabled. Secondary regions sees new files (from flushes and compactions) from primary once the secondary region refreshes the list of files in the region. But too frequent refreshes might cause extra Namenode pressure. If the files cannot be refreshed for longer than HFile TTL (hbase.master.hfilecleaner.ttl) the requests are rejected. Configuring HFile TTL to a larger value is also recommended with this setting.
   </description>
 </property>
+<property>
+  <name>hbase.region.replica.replication.memstore.enabled</name>
+  <value>true</value>
+  <description>
+    If you set this to `false`, replicas do not receive memstore updates from
+    the primary RegionServer. If you set this to `true`, you can still disable
+    memstore replication on a per-table basis, by setting the table's
+    `REGION_MEMSTORE_REPLICATION` configuration property to `false`. If
+    memstore replication is disabled, the secondaries will only receive
+    updates for events like flushes and bulkloads, and will not have access to
+    data which the primary has not yet flushed. This preserves the guarantee
+    of row-level consistency, even when the read requests `Consistency.TIMELINE`.
+  </description>
+</property>
 ----
 
 One thing to keep in mind also is that, region replica placement policy is only enforced by the `StochasticLoadBalancer` which is the default balancer.

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/asciidoc/_chapters/configuration.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc
index ed00a49..01f2eb7 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -98,6 +98,11 @@ This section lists required services and some required system configuration.
 |JDK 7
 |JDK 8
 
+|1.1
+|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
+|yes
+|Running with JDK 8 will work but is not well tested.
+
 |1.0
 |link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
 |yes
@@ -205,20 +210,22 @@ Use the following legend to interpret this table:
 * "X" = not supported
 * "NT" = Not tested
 
-[cols="1,1,1,1,1,1", options="header"]
+[cols="1,1,1,1,1,1,1", options="header"]
 |===
-| | HBase-0.92.x | HBase-0.94.x | HBase-0.96.x | HBase-0.98.x (Support for Hadoop 1.1+ is deprecated.) | HBase-1.0.x (Hadoop 1.x is NOT supported)
-|Hadoop-0.20.205 | S | X | X | X | X
-|Hadoop-0.22.x | S | X | X | X | X
-|Hadoop-1.0.x  |X | X | X | X | X
-|Hadoop-1.1.x | NT | S | S | NT | X
-|Hadoop-0.23.x | X | S | NT | X | X
-|Hadoop-2.0.x-alpha | X | NT | X | X | X
-|Hadoop-2.1.0-beta | X | NT | S | X | X
-|Hadoop-2.2.0 | X | NT | S | S | NT
-|Hadoop-2.3.x | X | NT | S | S | NT
-|Hadoop-2.4.x | X | NT | S | S | S
-|Hadoop-2.5.x | X | NT | S | S | S
+| | HBase-0.92.x | HBase-0.94.x | HBase-0.96.x | HBase-0.98.x (Support for Hadoop 1.1+ is deprecated.) | HBase-1.0.x (Hadoop 1.x is NOT supported) | HBase-1.1.x
+|Hadoop-0.20.205 | S | X | X | X | X | X
+|Hadoop-0.22.x | S | X | X | X | X | X
+|Hadoop-1.0.x  |X | X | X | X | X | X
+|Hadoop-1.1.x | NT | S | S | NT | X | X
+|Hadoop-0.23.x | X | S | NT | X | X | X
+|Hadoop-2.0.x-alpha | X | NT | X | X | X | X
+|Hadoop-2.1.0-beta | X | NT | S | X | X | X
+|Hadoop-2.2.0 | X | NT | S | S | NT | NT
+|Hadoop-2.3.x | X | NT | S | S | NT | NT
+|Hadoop-2.4.x | X | NT | S | S | S | S
+|Hadoop-2.5.x | X | NT | S | S | S | S
+|Hadoop-2.6.x | X | NT | NT | NT | S | S
+|Hadoop-2.7.x | X | NT | NT | NT | NT | NT
 |===
 
 .Replace the Hadoop Bundled With HBase!
@@ -994,8 +1001,7 @@ To enable it in 0.99 or above, add below property in _hbase-site.xml_:
 NOTE: DO NOT set `com.sun.management.jmxremote.port` for Java VM at the same time.
 
 Currently it supports Master and RegionServer Java VM.
-The reason why you only configure coprocessor for 'regionserver' is that, starting from HBase 0.99, a Master IS also a RegionServer.
-(See link:https://issues.apache.org/jira/browse/HBASE-10569[HBASE-10569] for more information.) By default, the JMX listens on TCP port 10102, you can further configure the port using below properties:
+By default, the JMX listens on TCP port 10102, you can further configure the port using below properties:
 
 [source,xml]
 ----
@@ -1062,7 +1068,7 @@ Finally start `jconsole` on the client using the key store:
 jconsole -J-Djavax.net.ssl.trustStore=/home/tianq/jconsoleKeyStore
 ----
 
-NOTE: for HBase 0.98, To enable the HBase JMX implementation on Master, you also need to add below property in _hbase-site.xml_: 
+NOTE: To enable the HBase JMX implementation on Master, you also need to add below property in _hbase-site.xml_: 
 
 [source,xml]
 ----

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/asciidoc/_chapters/developer.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc
index 26ba325..ee03614 100644
--- a/src/main/asciidoc/_chapters/developer.adoc
+++ b/src/main/asciidoc/_chapters/developer.adoc
@@ -401,6 +401,16 @@ mvn -DskipTests clean install && mvn -DskipTests package assembly:single
 
 The distribution tarball is built in _hbase-assembly/target/hbase-<version>-bin.tar.gz_.
 
+You can install or deploy the tarball by having the assembly:single goal before install or deploy in the maven command:
+
+----
+mvn -DskipTests package assembly:single install
+----
+----
+mvn -DskipTests package assembly:single deploy
+----
+
+
 [[build.gotchas]]
 ==== Build Gotchas
 
@@ -626,8 +636,7 @@ Release needs to be tagged for the next step.
 
 . Deploy to the Maven Repository.
 +
-Next, deploy HBase to the Apache Maven repository, using the `apache-release` profile instead of the `release` profile when running the +mvn
-                            deploy+ command.
+Next, deploy HBase to the Apache Maven repository, using the `apache-release` profile instead of the `release` profile when running the `mvn deploy` command.
 This profile invokes the Apache pom referenced by our pom files, and also signs your artifacts published to Maven, as long as the _settings.xml_ is configured correctly, as described in <<mvn.settings.file,mvn.settings.file>>.
 +
 [source,bourne]
@@ -638,6 +647,8 @@ $ mvn deploy -DskipTests -Papache-release
 +
 This command copies all artifacts up to a temporary staging Apache mvn repository in an 'open' state.
 More work needs to be done on these maven artifacts to make them generally available. 
++
+We do not release HBase tarball to the Apache Maven repository. To avoid deploying the tarball, do not include the `assembly:single` goal in your `mvn deploy` command. Check the deployed artifacts as described in the next section.
 
 . Make the Release Candidate available.
 +

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/asciidoc/_chapters/getting_started.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/getting_started.adoc b/src/main/asciidoc/_chapters/getting_started.adoc
index 76d793c..41674a0 100644
--- a/src/main/asciidoc/_chapters/getting_started.adoc
+++ b/src/main/asciidoc/_chapters/getting_started.adoc
@@ -619,12 +619,16 @@ For more about ZooKeeper configuration, including using an external ZooKeeper in
 .Web UI Port Changes
 NOTE: Web UI Port Changes
 +
-In HBase newer than 0.98.x, the HTTP ports used by the HBase Web UI changed from 60010 for the Master and 60030 for each RegionServer to 16610 for the Master and 16030 for the RegionServer.
+In HBase newer than 0.98.x, the HTTP ports used by the HBase Web UI changed from 60010 for the
+Master and 60030 for each RegionServer to 16010 for the Master and 16030 for the RegionServer.
 
 +
-If everything is set up correctly, you should be able to connect to the UI for the Master `http://node-a.example.com:16610/` or the secondary master at `http://node-b.example.com:16610/` for the secondary master, using a web browser.
+If everything is set up correctly, you should be able to connect to the UI for the Master
+`http://node-a.example.com:16010/` or the secondary master at `http://node-b.example.com:16010/`
+for the secondary master, using a web browser.
 If you can connect via `localhost` but not from another host, check your firewall rules.
-You can see the web UI for each of the RegionServers at port 16630 of their IP addresses, or by clicking their links in the web UI for the Master.
+You can see the web UI for each of the RegionServers at port 16030 of their IP addresses, or by
+clicking their links in the web UI for the Master.
 
 . Test what happens when nodes or services disappear.
 +

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/asciidoc/_chapters/hbase_apis.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/hbase_apis.adoc b/src/main/asciidoc/_chapters/hbase_apis.adoc
index 85dbad1..6d2777b 100644
--- a/src/main/asciidoc/_chapters/hbase_apis.adoc
+++ b/src/main/asciidoc/_chapters/hbase_apis.adoc
@@ -36,102 +36,99 @@ See <<external_apis>> for more information.
 
 == Examples
 
-.Create a Table Using Java
+.Create, modify and delete a Table Using Java
 ====
 
 [source,java]
 ----
 package com.example.hbase.admin;
 
+package util;
+
 import java.io.IOException;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-import org.apache.hadoop.conf.Configuration;
 
-import static com.example.hbase.Constants.*;
+public class Example {
 
-public class CreateSchema {
+  private static final String TABLE_NAME = "MY_TABLE_NAME_TOO";
+  private static final String CF_DEFAULT = "DEFAULT_COLUMN_FAMILY";
 
   public static void createOrOverwrite(Admin admin, HTableDescriptor table) throws IOException {
-    if (admin.tableExists(table.getName())) {
-      admin.disableTable(table.getName());
-      admin.deleteTable(table.getName());
+    if (admin.tableExists(table.getTableName())) {
+      admin.disableTable(table.getTableName());
+      admin.deleteTable(table.getTableName());
     }
     admin.createTable(table);
   }
 
-  public static void createSchemaTables (Configuration config) {
-    try {
-      final Admin admin = new Admin(config);
+  public static void createSchemaTables(Configuration config) throws IOException {
+    try (Connection connection = ConnectionFactory.createConnection(config);
+         Admin admin = connection.getAdmin()) {
+
       HTableDescriptor table = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
       table.addFamily(new HColumnDescriptor(CF_DEFAULT).setCompressionType(Algorithm.SNAPPY));
 
       System.out.print("Creating table. ");
       createOrOverwrite(admin, table);
       System.out.println(" Done.");
-
-      admin.close();
-    } catch (Exception e) {
-      e.printStackTrace();
-      System.exit(-1);
     }
   }
 
-}
-----
-====
-
-.Add, Modify, and Delete a Table
-====
-
-[source,java]
-----
-public static void upgradeFrom0 (Configuration config) {
-
-  try {
-    final Admin admin = new Admin(config);
-    TableName tableName = TableName.valueOf(TABLE_ASSETMETA);
-    HTableDescriptor table_assetmeta = new HTableDescriptor(tableName);
-    table_assetmeta.addFamily(new HColumnDescriptor(CF_DEFAULT).setCompressionType(Algorithm.SNAPPY));
+  public static void modifySchema (Configuration config) throws IOException {
+    try (Connection connection = ConnectionFactory.createConnection(config);
+         Admin admin = connection.getAdmin()) {
 
-    // Create a new table.
+      TableName tableName = TableName.valueOf(TABLE_NAME);
+      if (admin.tableExists(tableName)) {
+        System.out.println("Table does not exist.");
+        System.exit(-1);
+      }
 
-    System.out.print("Creating table_assetmeta. ");
-    admin.createTable(table_assetmeta);
-    System.out.println(" Done.");
+      HTableDescriptor table = new HTableDescriptor(tableName);
 
-    // Update existing table
-    HColumnDescriptor newColumn = new HColumnDescriptor("NEWCF");
-    newColumn.setCompactionCompressionType(Algorithm.GZ);
-    newColumn.setMaxVersions(HConstants.ALL_VERSIONS);
-    admin.addColumn(tableName, newColumn);
+      // Update existing table
+      HColumnDescriptor newColumn = new HColumnDescriptor("NEWCF");
+      newColumn.setCompactionCompressionType(Algorithm.GZ);
+      newColumn.setMaxVersions(HConstants.ALL_VERSIONS);
+      admin.addColumn(tableName, newColumn);
 
-    // Update existing column family
-    HColumnDescriptor existingColumn = new HColumnDescriptor(CF_DEFAULT);
-    existingColumn.setCompactionCompressionType(Algorithm.GZ);
-    existingColumn.setMaxVersions(HConstants.ALL_VERSIONS);
-    table_assetmeta.modifyFamily(existingColumn)
-    admin.modifyTable(tableName, table_assetmeta);
+      // Update existing column family
+      HColumnDescriptor existingColumn = new HColumnDescriptor(CF_DEFAULT);
+      existingColumn.setCompactionCompressionType(Algorithm.GZ);
+      existingColumn.setMaxVersions(HConstants.ALL_VERSIONS);
+      table.modifyFamily(existingColumn);
+      admin.modifyTable(tableName, table);
 
-    // Disable an existing table
-    admin.disableTable(tableName);
+      // Disable an existing table
+      admin.disableTable(tableName);
 
-    // Delete an existing column family
-    admin.deleteColumn(tableName, CF_DEFAULT);
+      // Delete an existing column family
+      admin.deleteColumn(tableName, CF_DEFAULT.getBytes("UTF-8"));
 
-    // Delete a table (Need to be disabled first)
-    admin.deleteTable(tableName);
+      // Delete a table (Need to be disabled first)
+      admin.deleteTable(tableName);
+    }
+  }
 
+  public static void main(String... args) throws IOException {
+    Configuration config = HBaseConfiguration.create();
 
-    admin.close();
-  } catch (Exception e) {
-    e.printStackTrace();
-    System.exit(-1);
+    //Add any necessary configuration files (hbase-site.xml, core-site.xml)
+    config.addResource(new Path(System.getenv("HBASE_CONF_DIR"), "hbase-site.xml"));
+    config.addResource(new Path(System.getenv("HADOOP_CONF_DIR"), "core-site.xml"));
+    createSchemaTables(config);
+    modifySchema(config);
   }
 }
 ----

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/asciidoc/_chapters/mapreduce.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/mapreduce.adoc b/src/main/asciidoc/_chapters/mapreduce.adoc
index a008a4f..2a42af2 100644
--- a/src/main/asciidoc/_chapters/mapreduce.adoc
+++ b/src/main/asciidoc/_chapters/mapreduce.adoc
@@ -51,27 +51,38 @@ In the notes below, we refer to o.a.h.h.mapreduce but replace with the o.a.h.h.m
 
 By default, MapReduce jobs deployed to a MapReduce cluster do not have access to either the HBase configuration under `$HBASE_CONF_DIR` or the HBase classes.
 
-To give the MapReduce jobs the access they need, you could add _hbase-site.xml_ to the _$HADOOP_HOME/conf/_ directory and add the HBase JARs to the _HADOOP_HOME/conf/_ directory, then copy these changes across your cluster.
-You could add _hbase-site.xml_ to _$HADOOP_HOME/conf_ and add HBase jars to the _$HADOOP_HOME/lib_ directory.
-You would then need to copy these changes across your cluster or edit _$HADOOP_HOMEconf/hadoop-env.sh_ and add them to the `HADOOP_CLASSPATH` variable.
+To give the MapReduce jobs the access they need, you could add _hbase-site.xml_ to _$HADOOP_HOME/conf_ and add HBase jars to the _$HADOOP_HOME/lib_ directory.
+You would then need to copy these changes across your cluster. Or you can edit _$HADOOP_HOME/conf/hadoop-env.sh_ and add them to the `HADOOP_CLASSPATH` variable.
 However, this approach is not recommended because it will pollute your Hadoop install with HBase references.
 It also requires you to restart the Hadoop cluster before Hadoop can use the HBase data.
 
+The recommended approach is to let HBase add its dependency jars itself and use `HADOOP_CLASSPATH` or `-libjars`.
+
 Since HBase 0.90.x, HBase adds its dependency JARs to the job configuration itself.
 The dependencies only need to be available on the local `CLASSPATH`.
-The following example runs the bundled HBase link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter] MapReduce job against a table named `usertable` If you have not set the environment variables expected in the command (the parts prefixed by a `$` sign and curly braces), you can use the actual system paths instead.
+The following example runs the bundled HBase link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter] MapReduce job against a table named `usertable`.
+If you have not set the environment variables expected in the command (the parts prefixed by a `$` sign and surrounded by curly braces), you can use the actual system paths instead.
 Be sure to use the correct version of the HBase JAR for your system.
-The backticks (``` symbols) cause ths shell to execute the sub-commands, setting the `CLASSPATH` as part of the command.
+The backticks (``` symbols) cause ths shell to execute the sub-commands, setting the output of `hbase classpath` (the command to dump HBase CLASSPATH) to `HADOOP_CLASSPATH`.
 This example assumes you use a BASH-compatible shell.
 
 [source,bash]
 ----
-$ HADOOP_CLASSPATH=`${HBASE_HOME}/bin/hbase classpath` ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/hbase-server-VERSION.jar rowcounter usertable
+$ HADOOP_CLASSPATH=`${HBASE_HOME}/bin/hbase classpath` ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/lib/hbase-server-VERSION.jar rowcounter usertable
 ----
 
 When the command runs, internally, the HBase JAR finds the dependencies it needs for ZooKeeper, Guava, and its other dependencies on the passed `HADOOP_CLASSPATH` and adds the JARs to the MapReduce job configuration.
 See the source at `TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)` for how this is done.
 
+The command `hbase mapredcp` can also help you dump the CLASSPATH entries required by MapReduce, which are the same jars `TableMapReduceUtil#addDependencyJars` would add.
+You can add them together with HBase conf directory to `HADOOP_CLASSPATH`.
+For jobs that do not package their dependencies or call `TableMapReduceUtil#addDependencyJars`, the following command structure is necessary:
+
+[source,bash]
+----
+$ HADOOP_CLASSPATH=`${HBASE_HOME}/bin/hbase mapredcp`:${HBASE_HOME}/conf hadoop jar MyApp.jar MyJobMainClass -libjars $(${HBASE_HOME}/bin/hbase mapredcp | tr ':' ',') ...
+----
+
 [NOTE]
 ====
 The example may not work if you are running HBase from its build directory rather than an installed location.
@@ -85,11 +96,11 @@ If this occurs, try modifying the command as follows, so that it uses the HBase
 
 [source,bash]
 ----
-$ HADOOP_CLASSPATH=${HBASE_HOME}/hbase-server/target/hbase-server-VERSION-SNAPSHOT.jar:`${HBASE_HOME}/bin/hbase classpath` ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/hbase-server/target/hbase-server-VERSION-SNAPSHOT.jar rowcounter usertable
+$ HADOOP_CLASSPATH=${HBASE_BUILD_HOME}/hbase-server/target/hbase-server-VERSION-SNAPSHOT.jar:`${HBASE_BUILD_HOME}/bin/hbase classpath` ${HADOOP_HOME}/bin/hadoop jar ${HBASE_BUILD_HOME}/hbase-server/target/hbase-server-VERSION-SNAPSHOT.jar rowcounter usertable
 ----
 ====
 
-.Notice to MapReduce users of HBase 0.96.1 and above
+.Notice to MapReduce users of HBase between 0.96.1 and 0.98.4
 [CAUTION]
 ====
 Some MapReduce jobs that use HBase fail to launch.

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/asciidoc/_chapters/upgrading.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/upgrading.adoc b/src/main/asciidoc/_chapters/upgrading.adoc
index ab3f154..6b63833 100644
--- a/src/main/asciidoc/_chapters/upgrading.adoc
+++ b/src/main/asciidoc/_chapters/upgrading.adoc
@@ -41,7 +41,7 @@ HBase has two versioning schemes, pre-1.0 and post-1.0. Both are detailed below.
 [[hbase.versioning.post10]]
 === Post 1.0 versions
 
-Starting with the 1.0.0 release, HBase uses link:http://semver.org/[Semantic Versioning] for its release versioning. In summary:
+Starting with the 1.0.0 release, HBase is working towards link:http://semver.org/[Semantic Versioning] for its release versioning. In summary:
 
 .Given a version number MAJOR.MINOR.PATCH, increment the:
 * MAJOR version when you make incompatible API changes,
@@ -72,10 +72,12 @@ In addition to the usual API versioning considerations HBase has other compatibi
 .Client API compatibility
 * Allow changing or removing existing client APIs.
 * An API needs to deprecated for a major version before we will change/remove it.
+* APIs available in a patch version will be available in all later patch versions. However, new APIs may be added which will not be available in earlier patch versions.
 * Example: A user using a newly deprecated api does not need to modify application code with hbase api calls until the next major version.
 
 .Client Binary compatibility
-* Old client code can run unchanged (no recompilation needed) against new jars.
+* Client code written to APIs available in a given patch release can run unchanged (no recompilation needed) against the new jars of later patch versions.
+* Client code written to APIs available in a given patch release might not run against the old jars from an earlier patch version.
 * Example: Old compiled client code will work unchanged with the new jars.
 
 .Server-Side Limited API compatibility (taken from Hadoop)
@@ -93,7 +95,7 @@ In addition to the usual API versioning considerations HBase has other compatibi
 * Web page APIs
 
 .Summary
-* A patch upgrade is a drop-in replacement. Any change that is not Java binary compatible would not be allowed.footnote:[See http://docs.oracle.com/javase/specs/jls/se7/html/jls-13.html.]
+* A patch upgrade is a drop-in replacement. Any change that is not Java binary compatible would not be allowed.footnote:[See http://docs.oracle.com/javase/specs/jls/se7/html/jls-13.html.]. Downgrading versions within patch releases may not be compatible.
 
 * A minor upgrade requires no application/client code modification. Ideally it would be a drop-in replacement but client code, coprocessors, filters, etc might have to be recompiled if new jars are used.
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/site/resources/images/hbase_logo_with_orca.png
----------------------------------------------------------------------
diff --git a/src/main/site/resources/images/hbase_logo_with_orca.png b/src/main/site/resources/images/hbase_logo_with_orca.png
new file mode 100644
index 0000000..7ed60e2
Binary files /dev/null and b/src/main/site/resources/images/hbase_logo_with_orca.png differ

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/site/resources/images/hbase_logo_with_orca.xcf
----------------------------------------------------------------------
diff --git a/src/main/site/resources/images/hbase_logo_with_orca.xcf b/src/main/site/resources/images/hbase_logo_with_orca.xcf
new file mode 100644
index 0000000..8d88da2
Binary files /dev/null and b/src/main/site/resources/images/hbase_logo_with_orca.xcf differ

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/site/resources/images/jumping-orca_transparent_rotated.xcf
----------------------------------------------------------------------
diff --git a/src/main/site/resources/images/jumping-orca_transparent_rotated.xcf b/src/main/site/resources/images/jumping-orca_transparent_rotated.xcf
new file mode 100644
index 0000000..be9e3d9
Binary files /dev/null and b/src/main/site/resources/images/jumping-orca_transparent_rotated.xcf differ

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/site/resources/images/region_split_process.png
----------------------------------------------------------------------
diff --git a/src/main/site/resources/images/region_split_process.png b/src/main/site/resources/images/region_split_process.png
new file mode 100644
index 0000000..2717617
Binary files /dev/null and b/src/main/site/resources/images/region_split_process.png differ

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/site/site.xml
----------------------------------------------------------------------
diff --git a/src/main/site/site.xml b/src/main/site/site.xml
index 81c9315..b7debd3 100644
--- a/src/main/site/site.xml
+++ b/src/main/site/site.xml
@@ -64,29 +64,30 @@
     </menu>
     <menu name="Documentation">
       <item name="Reference Guide" href="book.html" target="_blank" />
-      <item name="Getting Started" href="book.html#quickstart" />
-      <item name="User API" href="apidocs/index.html" />
-      <item name="Developer API" href="devapidocs/index.html" />
+      <item name="Reference Guide (PDF)" href="apache_hbase_reference_guide.pdf" target="_blank" />
+      <item name="Getting Started" href="book.html#quickstart" target="_blank" />
+      <item name="User API" href="apidocs/index.html" target="_blank" />
+      <item name="Developer API" href="devapidocs/index.html" target="_blank" />
       <item name="X-Ref" href="xref/index.html" />
-      <item name="中文参考指南(单页)" href="http://abloz.com/hbase/book.html" />
-      <item name="FAQ" href="book.html#faq" />
-      <item name="Videos/Presentations" href="book.html#other.info" />
-      <item name="Wiki" href="http://wiki.apache.org/hadoop/Hbase" />
-      <item name="ACID Semantics" href="acid-semantics.html" />
-      <item name="Bulk Loads" href="book.html#arch.bulk.load" />
-      <item name="Metrics" href="metrics.html" />
-      <item name="HBase on Windows" href="cygwin.html" />
-      <item name="Cluster replication" href="replication.html" />
+      <item name="中文参考指南(单页)" href="http://abloz.com/hbase/book.html" target="_blank" />
+      <item name="FAQ" href="book.html#faq" target="_blank" />
+      <item name="Videos/Presentations" href="book.html#other.info" target="_blank" />
+      <item name="Wiki" href="http://wiki.apache.org/hadoop/Hbase" target="_blank" />
+      <item name="ACID Semantics" href="acid-semantics.html" target="_blank" />
+      <item name="Bulk Loads" href="book.html#arch.bulk.load" target="_blank" />
+      <item name="Metrics" href="metrics.html" target="_blank" />
+      <item name="HBase on Windows" href="cygwin.html" target="_blank" />
+      <item name="Cluster replication" href="replication.html" target="_blank" />
     </menu>
     <menu name="0.94 Documentation">
-      <item name="API" href="0.94/apidocs/index.html" />
-      <item name="X-Ref" href="0.94/xref/index.html" />
-      <item name="Ref Guide (single-page)" href="0.94/book.html" />
+      <item name="API" href="0.94/apidocs/index.html" target="_blank" />
+      <item name="X-Ref" href="0.94/xref/index.html" target="_blank" />
+      <item name="Ref Guide (single-page)" href="0.94/book.html" target="_blank" />
     </menu>
     <menu name="ASF">
-      <item name="Apache Software Foundation" href="http://www.apache.org/foundation/" />
-      <item name="How Apache Works" href="http://www.apache.org/foundation/how-it-works.html" />
-      <item name="Sponsoring Apache" href="http://www.apache.org/foundation/sponsorship.html" />
+      <item name="Apache Software Foundation" href="http://www.apache.org/foundation/" target="_blank" />
+      <item name="How Apache Works" href="http://www.apache.org/foundation/how-it-works.html" target="_blank" />
+      <item name="Sponsoring Apache" href="http://www.apache.org/foundation/sponsorship.html" target="_blank" />
     </menu>
   </body>
 </project>

http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b0dbee/src/main/site/xdoc/index.xml
----------------------------------------------------------------------
diff --git a/src/main/site/xdoc/index.xml b/src/main/site/xdoc/index.xml
index a40ab4b..d7e1e4e 100644
--- a/src/main/site/xdoc/index.xml
+++ b/src/main/site/xdoc/index.xml
@@ -17,17 +17,21 @@
   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
   xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
   <properties>
-    <title>Apache HBase&#153; Home</title>
+    <title>Apache HBase&#8482; Home</title>
     <link rel="shortcut icon" href="/images/favicon.ico" />
   </properties>
 
   <body>
-    <section name="Welcome to Apache HBase&#153;">
-        <p><a href="http://www.apache.org/">Apache</a> HBase&#153; is the <a href="http://hadoop.apache.org">Hadoop</a> database, a distributed, scalable, big data store.
+    <section name="Welcome to Apache HBase&#8482;">
+        <p><a href="http://www.apache.org/">Apache</a> HBase&#8482; is the <a href="http://hadoop.apache.org">Hadoop</a> database, a distributed, scalable, big data store.
+    </p>
+    <h4>Download Apache HBase&#8482;</h4>
+    <p>
+    Click <b><a href="http://www.apache.org/dyn/closer.cgi/hbase/">here</a></b> to download Apache HBase&#8482;.
     </p>
     <h4>When Would I Use Apache HBase?</h4>
     <p>
-    Use Apache HBase when you need random, realtime read/write access to your Big Data.
+    Use Apache HBase&#8482; when you need random, realtime read/write access to your Big Data.
     This project's goal is the hosting of very large tables -- billions of rows X millions of columns -- atop clusters of commodity hardware.
 Apache HBase is an open-source, distributed, versioned, non-relational database modeled after Google's <a href="http://research.google.com/archive/bigtable.html">Bigtable: A Distributed Storage System for Structured Data</a> by Chang et al.
  Just as Bigtable leverages the distributed data storage provided by the Google File System, Apache HBase provides Bigtable-like capabilities on top of Hadoop and HDFS.