You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ns...@apache.org on 2011/10/11 04:01:00 UTC

svn commit: r1181350 - in /hbase/branches/0.89/conf: hadoop-metrics.properties hbase-site.xml

Author: nspiegelberg
Date: Tue Oct 11 02:01:00 2011
New Revision: 1181350

URL: http://svn.apache.org/viewvc?rev=1181350&view=rev
Log:
Revert unintended conf changes from r8089

Modified:
    hbase/branches/0.89/conf/hadoop-metrics.properties
    hbase/branches/0.89/conf/hbase-site.xml

Modified: hbase/branches/0.89/conf/hadoop-metrics.properties
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/conf/hadoop-metrics.properties?rev=1181350&r1=1181349&r2=1181350&view=diff
==============================================================================
--- hbase/branches/0.89/conf/hadoop-metrics.properties (original)
+++ hbase/branches/0.89/conf/hadoop-metrics.properties Tue Oct 11 02:01:00 2011
@@ -1,29 +1,54 @@
-# DEFAULT METRICS CONFIGURATION
-# - Configuration used in production tiers. Allows only JMX stats collection
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# Configuration of the "hbase" context for null
+hbase.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "hbase" context for file
+# hbase.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
+# hbase.period=10
+# hbase.fileName=/tmp/metrics_hbase.log
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# hbase.period=10
+# hbase.servers=GMETADHOST_IP:8649
 
-# Configuration of the "dfs" context
-dfs.class=org.apache.hadoop.metrics.jmx.JMXContext
-dfs.period=10
-
-# Configuration of the "mapred" context
-mapred.class=org.apache.hadoop.metrics.jmx.JMXContext
-dfs.period=10
-jmx_records=jobtracker,tasktracker
-
-# Configuration of the "jvm" context
+# Configuration of the "jvm" context for null
 jvm.class=org.apache.hadoop.metrics.spi.NullContext
 
-
-###########################################################
-###########################################################
-
-# Beta tiers may want to use CompositeContext to enable both JMX + File export
-# Example configuration given below...
-
-#dfs.class=org.apache.hadoop.metrics.spi.CompositeContext
-#dfs.arity=2
-#dfs.sub1.class=org.apache.hadoop.metrics.file.FileContext
-#dfs.sub1.period=10
-#dfs.fileName=/usr/local/hadoop/logs/DFS1/dfs_metrics.log
-#dfs.sub2.class=org.apache.hadoop.metrics.jmx.JMXContext
-#dfs.sub2.period=10
+# Configuration of the "jvm" context for file
+# jvm.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
+# jvm.period=10
+# jvm.fileName=/tmp/metrics_jvm.log
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# jvm.period=10
+# jvm.servers=GMETADHOST_IP:8649
+
+# Configuration of the "rpc" context for null
+rpc.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "rpc" context for file
+# rpc.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
+# rpc.period=10
+# rpc.fileName=/tmp/metrics_rpc.log
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# rpc.period=10
+# rpc.servers=GMETADHOST_IP:8649

Modified: hbase/branches/0.89/conf/hbase-site.xml
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/conf/hbase-site.xml?rev=1181350&r1=1181349&r2=1181350&view=diff
==============================================================================
--- hbase/branches/0.89/conf/hbase-site.xml (original)
+++ hbase/branches/0.89/conf/hbase-site.xml Tue Oct 11 02:01:00 2011
@@ -22,66 +22,4 @@
  */
 -->
 <configuration>
-
-<!-- NEEDED WHETHER OR NOT YOU ARE RUNNING OVER HDFS -->
-<property>
-  <name>hbase.cluster.distributed</name>
-  <value>true</value>
-  <description>For psuedo-distributed, you want to set this to true.
-  false means that HBase tries to put Master + RegionServers in one process.
-  Pseudo-distributed = seperate processes/pids</description>
-</property> <property>
-  <name>hbase.regionserver.hlog.replication</name>
-  <value>1</value>
-  <description>For HBase to offer good data durability, we roll logs if
-  filesystem replication falls below a certain amount.  In psuedo-distributed
-  mode, you normally only have the local filesystem or 1 HDFS DataNode, so you
-  don't want to roll logs constantly.</description>
-</property>
-<property>
-  <name>hbase.tmp.dir</name>
-  <value>/tmp/hbase-testing</value>
-  <description>Temporary directory on the local filesystem.</description>
-</property>
-<property>
-  <name>hbase.regionserver.port</name>
-  <value>50020</value>
-  <description>50020</description>
-</property>
-<property>
-  <name>hbase.regionserver.info.port</name>
-  <value>50030</value>
-  <description>50030</description>
-</property>
-
-<!-- DEFAULT = use local filesystem, not HDFS
-     ADD THESE LINES if you have a copy of HDFS source and want to run HBase
-     psuedo-distributed over a psuedo-distributed HDFS cluster.
-     For HDFS psuedo-distributed setup, see their documentation:
-
-     http://hadoop.apache.org/common/docs/r0.20.2/quickstart.html#PseudoDistributed
-
-
-<property>
-  <name>hbase.rootdir</name>
-  <value>hdfs://localhost:9000/hbase-testing</value>
-  <description>The directory shared by region servers.
-  Should be fully-qualified to include the filesystem to use.
-  E.g: hdfs://NAMENODE_SERVER:PORT/HBASE_ROOTDIR
-  </description>
-</property>
--->
-
-<!-- OPTIONAL: You might want to add these options depending upon your use case
-
-
-<property>
-  <name>dfs.support.append</name>
-  <value>true</value>
-  <description>Allow append support (if you want to test data durability with HDFS)
-  </description>
-</property>
--->
-
-
 </configuration>