You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by nd...@apache.org on 2019/11/18 16:58:18 UTC

[hbase] branch master updated: HBASE-23289 Update links to Hadoop wiki in book and code

This is an automated email from the ASF dual-hosted git repository.

ndimiduk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
     new 8bfdfe1  HBASE-23289 Update links to Hadoop wiki in book and code
8bfdfe1 is described below

commit 8bfdfe1b8522b4aa01de2cc988f3cee6d5024df6
Author: Mingliang Liu <li...@apache.org>
AuthorDate: Sat Nov 16 22:53:04 2019 -0800

    HBASE-23289 Update links to Hadoop wiki in book and code
    
    Signed-off-by: Nick Dimiduk <nd...@apache.org>
---
 .../src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java    | 4 ++--
 .../apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java   | 6 ++----
 .../src/main/java/org/apache/hadoop/hbase/mapred/package-info.java  | 2 +-
 .../main/java/org/apache/hadoop/hbase/mapreduce/package-info.java   | 2 +-
 .../org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java     | 2 +-
 src/main/asciidoc/_chapters/configuration.adoc                      | 4 ++--
 src/main/asciidoc/_chapters/cp.adoc                                 | 2 +-
 src/main/asciidoc/_chapters/faq.adoc                                | 2 +-
 src/main/asciidoc/_chapters/ops_mgt.adoc                            | 2 +-
 src/main/asciidoc/_chapters/troubleshooting.adoc                    | 2 +-
 src/main/asciidoc/_chapters/zookeeper.adoc                          | 2 +-
 src/site/asciidoc/metrics.adoc                                      | 2 +-
 src/site/site.xml                                                   | 2 +-
 src/site/xdoc/metrics.xml                                           | 2 +-
 src/site/xdoc/old_news.xml                                          | 2 +-
 15 files changed, 18 insertions(+), 20 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index a203936..c3919df 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -321,7 +321,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
   /**
    * Compression types supported in hbase.
    * LZO is not bundled as part of the hbase distribution.
-   * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
+   * See <a href="http://hbase.apache.org/book.html#lzo.compression">LZO Compression</a>
    * for how to enable it.
    * @param value Compression type setting.
    * @return this (for chained invocation)
@@ -371,7 +371,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
   /**
    * Compression types supported in hbase.
    * LZO is not bundled as part of the hbase distribution.
-   * See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
+   * See <a href="http://hbase.apache.org/book.html#lzo.compression">LZO Compression</a>
    * for how to enable it.
    * @param value Compression type setting.
    * @return this (for chained invocation)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
index da7bdda..0b78c9d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
@@ -784,8 +784,7 @@ public class ColumnFamilyDescriptorBuilder {
     /**
      * Compression types supported in hbase. LZO is not bundled as part of the
      * hbase distribution. See
-     * <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO
-     * Compression</a>
+     * See <a href="http://hbase.apache.org/book.html#lzo.compression">LZO Compression</a>
      * for how to enable it.
      *
      * @param type Compression type setting.
@@ -837,8 +836,7 @@ public class ColumnFamilyDescriptorBuilder {
     /**
      * Compression types supported in hbase. LZO is not bundled as part of the
      * hbase distribution. See
-     * <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO
-     * Compression</a>
+     * See <a href="http://hbase.apache.org/book.html#lzo.compression">LZO Compression</a>
      * for how to enable it.
      *
      * @param type Compression type setting.
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/package-info.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/package-info.java
index 1da3a52..b375b39 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/package-info.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/package-info.java
@@ -17,7 +17,7 @@
  * limitations under the License.
  */
 /**
-Provides HBase <a href="http://wiki.apache.org/hadoop/HadoopMapReduce">MapReduce</a>
+Provides HBase <a href="https://cwiki.apache.org/confluence/display/HADOOP2/HadoopMapReduce">MapReduce</a>
 Input/OutputFormats, a table indexing MapReduce job, and utility methods.
 
 <p>See <a href="http://hbase.apache.org/book.html#mapreduce">HBase and MapReduce</a>
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java
index b1f15ba..29b6309 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java
@@ -17,7 +17,7 @@
  * limitations under the License.
  */
 /**
-Provides HBase <a href="http://wiki.apache.org/hadoop/HadoopMapReduce">MapReduce</a>
+Provides HBase <a href="https://cwiki.apache.org/confluence/display/HADOOP2/HadoopMapReduce">MapReduce</a>
 Input/OutputFormats, a table indexing MapReduce job, and utility methods.
 
 <p>See <a href="http://hbase.apache.org/book.html#mapreduce">HBase and MapReduce</a>
diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
index b56ab97..8fe18b1 100644
--- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
+++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
@@ -67,7 +67,7 @@ import org.slf4j.LoggerFactory;
  * the create it will do a getChildren("/") and see "x-222-1", "x-542-30",
  * "x-352-109", x-333-110". The process will know that the original create
  * succeeded an the znode it created is "x-352-109".
- * @see "http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling"
+ * @see "https://cwiki.apache.org/confluence/display/HADOOP2/ZooKeeper+ErrorHandling"
  */
 @InterfaceAudience.Private
 public class RecoverableZooKeeper {
diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc
index c93d2e4..3e91dcb 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -43,7 +43,7 @@ _backup-masters_::
 
 _hadoop-metrics2-hbase.properties_::
   Used to connect HBase Hadoop's Metrics2 framework.
-  See the link:https://wiki.apache.org/hadoop/HADOOP-6728-MetricsV2[Hadoop Wiki entry] for more information on Metrics2.
+  See the link:https://cwiki.apache.org/confluence/display/HADOOP2/HADOOP-6728-MetricsV2[Hadoop Wiki entry] for more information on Metrics2.
   Contains only commented-out examples by default.
 
 _hbase-env.cmd_ and _hbase-env.sh_::
@@ -143,7 +143,7 @@ NOTE: You must set `JAVA_HOME` on each node of your cluster. _hbase-env.sh_ prov
 [[os]]
 .Operating System Utilities
 ssh::
-  HBase uses the Secure Shell (ssh) command and utilities extensively to communicate between cluster nodes. Each server in the cluster must be running `ssh` so that the Hadoop and HBase daemons can be managed. You must be able to connect to all nodes via SSH, including the local node, from the Master as well as any backup Master, using a shared key rather than a password. You can see the basic methodology for such a set-up in Linux or Unix systems at "<<passwordless.ssh.quickstart>>". If [...]
+  HBase uses the Secure Shell (ssh) command and utilities extensively to communicate between cluster nodes. Each server in the cluster must be running `ssh` so that the Hadoop and HBase daemons can be managed. You must be able to connect to all nodes via SSH, including the local node, from the Master as well as any backup Master, using a shared key rather than a password. You can see the basic methodology for such a set-up in Linux or Unix systems at "<<passwordless.ssh.quickstart>>". If [...]
 
 DNS::
   HBase uses the local hostname to self-report its IP address.
diff --git a/src/main/asciidoc/_chapters/cp.adoc b/src/main/asciidoc/_chapters/cp.adoc
index a600569..faf84c3 100644
--- a/src/main/asciidoc/_chapters/cp.adoc
+++ b/src/main/asciidoc/_chapters/cp.adoc
@@ -139,7 +139,7 @@ Referential Integrity::
 
 Secondary Indexes::
   You can use a coprocessor to maintain secondary indexes. For more information, see
-  link:https://wiki.apache.org/hadoop/Hbase/SecondaryIndexing[SecondaryIndexing].
+  link:https://cwiki.apache.org/confluence/display/HADOOP2/Hbase+SecondaryIndexing[SecondaryIndexing].
 
 
 ==== Types of Observer Coprocessor
diff --git a/src/main/asciidoc/_chapters/faq.adoc b/src/main/asciidoc/_chapters/faq.adoc
index 0e498ac..5b742ce 100644
--- a/src/main/asciidoc/_chapters/faq.adoc
+++ b/src/main/asciidoc/_chapters/faq.adoc
@@ -33,7 +33,7 @@ When should I use HBase?::
   See <<arch.overview>> in the Architecture chapter.
 
 Are there other HBase FAQs?::
-  See the FAQ that is up on the wiki, link:https://wiki.apache.org/hadoop/Hbase/FAQ[HBase Wiki FAQ].
+  See the FAQ that is up on the wiki, link:https://cwiki.apache.org/confluence/display/HADOOP2/Hbase+FAQ[HBase Wiki FAQ].
 
 Does HBase support SQL?::
   Not really. SQL-ish support for HBase via link:https://hive.apache.org/[Hive] is in development, however Hive is based on MapReduce which is not generally suitable for low-latency requests. See the <<datamodel>> section for examples on the HBase client.
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc
index 616b020..d013f51 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -1438,7 +1438,7 @@ But usually disks do the "John Wayne" -- i.e.
 take a while to go down spewing errors in _dmesg_ -- or for some reason, run much slower than their companions.
 In this case you want to decommission the disk.
 You have two options.
-You can link:https://wiki.apache.org/hadoop/FAQ#I_want_to_make_a_large_cluster_smaller_by_taking_out_a_bunch_of_nodes_simultaneously._How_can_this_be_done.3F[decommission
+You can link:https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDataNodeAdminGuide.html[decommission
             the datanode] or, less disruptive in that only the bad disks data will be rereplicated, can stop the datanode, unmount the bad volume (You can't umount a volume while the datanode is using it), and then restart the datanode (presuming you have set dfs.datanode.failed.volumes.tolerated > 0). The regionserver will throw some errors in its logs as it recalibrates where to get its data from -- it will likely roll its WAL log too -- but in general but for some latency spikes, it s [...]
 
 .Short Circuit Reads
diff --git a/src/main/asciidoc/_chapters/troubleshooting.adoc b/src/main/asciidoc/_chapters/troubleshooting.adoc
index 9fc7c35..d5161c1 100644
--- a/src/main/asciidoc/_chapters/troubleshooting.adoc
+++ b/src/main/asciidoc/_chapters/troubleshooting.adoc
@@ -1288,7 +1288,7 @@ If you have a DNS server, you can set `hbase.zookeeper.dns.interface` and `hbase
 
 ZooKeeper is the cluster's "canary in the mineshaft". It'll be the first to notice issues if any so making sure its happy is the short-cut to a humming cluster.
 
-See the link:https://wiki.apache.org/hadoop/ZooKeeper/Troubleshooting[ZooKeeper Operating Environment Troubleshooting] page.
+See the link:https://cwiki.apache.org/confluence/display/HADOOP2/ZooKeeper+Troubleshooting[ZooKeeper Operating Environment Troubleshooting] page.
 It has suggestions and tools for checking disk and networking performance; i.e.
 the operating environment your ZooKeeper and HBase are running in.
 
diff --git a/src/main/asciidoc/_chapters/zookeeper.adoc b/src/main/asciidoc/_chapters/zookeeper.adoc
index 0123927..98fc498 100644
--- a/src/main/asciidoc/_chapters/zookeeper.adoc
+++ b/src/main/asciidoc/_chapters/zookeeper.adoc
@@ -137,7 +137,7 @@ Just make sure to set `HBASE_MANAGES_ZK` to `false`      if you want it to stay
 
 For more information about running a distinct ZooKeeper cluster, see the ZooKeeper link:https://zookeeper.apache.org/doc/current/zookeeperStarted.html[Getting
         Started Guide].
-Additionally, see the link:https://wiki.apache.org/hadoop/ZooKeeper/FAQ#A7[ZooKeeper Wiki] or the link:https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#sc_zkMulitServerSetup[ZooKeeper
+Additionally, see the link:https://cwiki.apache.org/confluence/display/HADOOP2/ZooKeeper+FAQ#ZooKeeperFAQ-7[ZooKeeper Wiki] or the link:https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#sc_zkMulitServerSetup[ZooKeeper
         documentation] for more information on ZooKeeper sizing.
 
 [[zk.sasl.auth]]
diff --git a/src/site/asciidoc/metrics.adoc b/src/site/asciidoc/metrics.adoc
index e44db4c..146b7e1 100644
--- a/src/site/asciidoc/metrics.adoc
+++ b/src/site/asciidoc/metrics.adoc
@@ -26,7 +26,7 @@ Apache HBase (TM) emits Hadoop link:https://hadoop.apache.org/core/docs/stable/a
 
 First read up on Hadoop link:https://hadoop.apache.org/core/docs/stable/api/org/apache/hadoop/metrics/package-summary.html[metrics].
 
-If you are using ganglia, the link:https://wiki.apache.org/hadoop/GangliaMetrics[GangliaMetrics] wiki page is useful read.
+If you are using ganglia, the link:https://cwiki.apache.org/confluence/display/HADOOP2/GangliaMetrics[GangliaMetrics] wiki page is useful read.
 
 To have HBase emit metrics, edit `$HBASE_HOME/conf/hadoop-metrics.properties` and enable metric 'contexts' per plugin.  As of this writing, hadoop supports *file* and *ganglia* plugins. Yes, the hbase metrics files is named hadoop-metrics rather than _hbase-metrics_ because currently at least the hadoop metrics system has the properties filename hardcoded. Per metrics _context_, comment out the NullContext and enable one or more plugins instead.
 
diff --git a/src/site/site.xml b/src/site/site.xml
index f616718..e0f6191 100644
--- a/src/site/site.xml
+++ b/src/site/site.xml
@@ -114,7 +114,7 @@
       <item name="中文参考指南(单页)" href="http://abloz.com/hbase/book.html" target="_blank" />
       <item name="FAQ" href="book.html#faq" target="_blank" />
       <item name="Videos/Presentations" href="book.html#other.info" target="_blank" />
-      <item name="Wiki" href="http://wiki.apache.org/hadoop/Hbase" target="_blank" />
+      <item name="Wiki" href="https://cwiki.apache.org/confluence/display/HADOOP2/Hbase" target="_blank" />
       <item name="ACID Semantics" href="acid-semantics.html" target="_blank" />
       <item name="Bulk Loads" href="book.html#arch.bulk.load" target="_blank" />
       <item name="Metrics" href="metrics.html" target="_blank" />
diff --git a/src/site/xdoc/metrics.xml b/src/site/xdoc/metrics.xml
index 620c14b..d4973da 100644
--- a/src/site/xdoc/metrics.xml
+++ b/src/site/xdoc/metrics.xml
@@ -34,7 +34,7 @@ under the License.
       </section>
       <section name="Setup">
       <p>First read up on Hadoop <a href="http://hadoop.apache.org/core/docs/stable/api/org/apache/hadoop/metrics/package-summary.html">metrics</a>.
-      If you are using ganglia, the <a href="http://wiki.apache.org/hadoop/GangliaMetrics">GangliaMetrics</a>
+      If you are using ganglia, the <a href="https://cwiki.apache.org/confluence/display/HADOOP2/GangliaMetrics">GangliaMetrics</a>
       wiki page is useful read.</p>
       <p>To have HBase emit metrics, edit <code>$HBASE_HOME/conf/hadoop-metrics.properties</code>
       and enable metric 'contexts' per plugin.  As of this writing, hadoop supports
diff --git a/src/site/xdoc/old_news.xml b/src/site/xdoc/old_news.xml
index 4543979..c43fcbe 100644
--- a/src/site/xdoc/old_news.xml
+++ b/src/site/xdoc/old_news.xml
@@ -102,7 +102,7 @@ under the License.
       The Apache Foundation will be celebrating its 10th anniversary in beautiful Oakland by the Bay. Lots of good talks and meetups including an HBase presentation by a couple of the lads.</p>
       <p>HBase at Hadoop World in NYC: October 2nd, 2009: A few of us will be talking on Practical HBase out east at <a href="http://www.cloudera.com/hadoop-world-nyc">Hadoop World: NYC</a>.</p>
       <p>HUG7 and HBase Hackathon: August 7th-9th, 2009 at StumbleUpon in SF: Sign up for the <a href="http://www.meetup.com/hbaseusergroup/calendar/10950511/">HBase User Group Meeting, HUG7</a> or for the <a href="http://www.meetup.com/hackathon/calendar/10951718/">Hackathon</a> or for both (all are welcome!).</p>
-      <p>June, 2009 -- HBase at HadoopSummit2009 and at NOSQL: See the <a href="http://wiki.apache.org/hadoop/HBase/HBasePresentations">presentations</a></p>
+      <p>June, 2009 -- HBase at HadoopSummit2009 and at NOSQL: See the <a href="https://cwiki.apache.org/confluence/display/HADOOP2/HBase+HBasePresentations">presentations</a></p>
       <p>March 3rd, 2009 -- HUG6: <a href="http://www.meetup.com/hbaseusergroup/calendar/9764004/">HBase User Group 6</a></p>
       <p>January 30th, 2009 -- LA Hbackathon:<a href="http://www.meetup.com/hbasela/calendar/9450876/">HBase January Hackathon Los Angeles</a> at <a href="http://streamy.com" >Streamy</a> in Manhattan Beach</p>
   </section>