You are viewing a plain text version of this content. The canonical link for it is here.
Posted to user@hbase.apache.org by tgh <gu...@ia.ac.cn> on 2012/12/20 12:09:15 UTC

help! code for hbase 0.90 ries ERROR in hbase 0.94

Meanwhile, I use hbase0.90 and hadoop1.1.1

And I want to know if we use these two version together, 
How to configuration these two, 

I wander if there some ERROR in my configuration,

-----------------configuration files in hbase/conf-----------

# See http://wiki.apache.org/hadoop/GangliaMetrics
# Make sure you know whether you are using ganglia 3.0 or 3.1.
# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
# And, yes, this file is named hadoop-metrics.properties rather than
# hbase-metrics.properties because we're leveraging the hadoop metrics
# package and hadoop-metrics.properties is an hardcoded-name, at least
# for the moment.
#
# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
# GMETADHOST_IP is the hostname (or) IP address of the server on which the ganglia 
# meta daemon (gmetad) service is running

# Configuration of the "hbase" context for NullContextWithUpdateThread
# NullContextWithUpdateThread is a  null context which has a thread calling
# periodically when monitoring is started. This keeps the data sampled
# correctly.
hbase.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
hbase.period=10

# Configuration of the "hbase" context for file
# hbase.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
# hbase.fileName=/tmp/metrics_hbase.log

# HBase-specific configuration to reset long-running stats (e.g. compactions)
# If this variable is left out, then the default is no expiration.
hbase.extendedperiod = 3600

# Configuration of the "hbase" context for ganglia
# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# hbase.period=10
# hbase.servers=GMETADHOST_IP:8649

# Configuration of the "jvm" context for null
jvm.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
jvm.period=10

# Configuration of the "jvm" context for file
# jvm.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
# jvm.fileName=/tmp/metrics_jvm.log

# Configuration of the "jvm" context for ganglia
# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# jvm.period=10
# jvm.servers=GMETADHOST_IP:8649

# Configuration of the "rpc" context for null
rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
rpc.period=10

# Configuration of the "rpc" context for file
# rpc.class=org.apache.hadoop.hbase.metrics.file.TimeStampingFileContext
# rpc.fileName=/tmp/metrics_rpc.log

# Configuration of the "rpc" context for ganglia
# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# rpc.period=10
# rpc.servers=GMETADHOST_IP:8649

# Configuration of the "rest" context for ganglia
# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
# rest.class=org.apache.hadoop.metrics.ganglia.GangliaContext
# rest.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
# rest.period=10
# rest.servers=GMETADHOST_IP:8649
#
#/**
# * Copyright 2007 The Apache Software Foundation
# *
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements.  See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership.  The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License.  You may obtain a copy of the License at
# *
# *     http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */

# Set environment variables here.

# The java implementation to use.  Java 1.6 required.
export JAVA_HOME=/home/tianguanhua1/jdk1.7.0_09

# Extra Java CLASSPATH elements.  Optional.
# export HBASE_CLASSPATH=

# The maximum amount of heap to use, in MB. Default is 1000.
export HBASE_HEAPSIZE=8000

# Extra Java runtime options.
# Below are what we set by default.  May only work with SUN JVM.
# For more on why as well as other possible settings,
# see http://wiki.apache.org/hadoop/PerformanceTuning
export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC"

# Uncomment below to enable java garbage collection logging in the .out file.
# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps $HBASE_GC_OPTS" 

# Uncomment below (along with above GC logging) to put GC information in its own logfile (will set HBASE_GC_OPTS)
# export HBASE_USE_GC_LOGFILE=true


# Uncomment below if you intend to use the EXPERIMENTAL off heap cache.
# export HBASE_OPTS="$HBASE_OPTS -XX:MaxDirectMemorySize="
# Set hbase.offheapcache.percentage in hbase-site.xml to a nonzero value.


# Uncomment and adjust to enable JMX exporting
# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
#
# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10101"
# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10102"
# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"

# File naming hosts on which HRegionServers will run.  $HBASE_HOME/conf/regionservers by default.
# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers

# File naming hosts on which backup HMaster will run.  $HBASE_HOME/conf/backup-masters by default.
# export HBASE_BACKUP_MASTERS=${HBASE_HOME}/conf/backup-masters

# Extra ssh options.  Empty by default.
# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"

# Where log files are stored.  $HBASE_HOME/logs by default.
# export HBASE_LOG_DIR=${HBASE_HOME}/logs

# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers 
# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070"
# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071"
# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072"
# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8073"

# A string representing this instance of hbase. $USER by default.
# export HBASE_IDENT_STRING=$USER

# The scheduling priority for daemon processes.  See 'man nice'.
# export HBASE_NICENESS=10

# The directory where pid files are stored. /tmp by default.
# export HBASE_PID_DIR=/var/hadoop/pids

# Seconds to sleep between slave commands.  Unset by default.  This
# can be useful in large clusters, where, e.g., slave rsyncs can
# otherwise arrive faster than the master can service them.
# export HBASE_SLAVE_SLEEP=0.1

# Tell HBase whether it should manage it's own instance of Zookeeper or not.
 export HBASE_MANAGES_ZK=false
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
-->

<configuration>
  <property>
    <name>security.client.protocol.acl</name>
    <value>*</value>
    <description>ACL for HRegionInterface protocol implementations (ie. 
    clients talking to HRegionServers)
    The ACL is a comma-separated list of user and group names. The user and 
    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
    A special value of "*" means all users are allowed.</description>
  </property>

  <property>
    <name>security.admin.protocol.acl</name>
    <value>*</value>
    <description>ACL for HMasterInterface protocol implementation (ie. 
    clients talking to HMaster for admin operations).
    The ACL is a comma-separated list of user and group names. The user and 
    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
    A special value of "*" means all users are allowed.</description>
  </property>

  <property>
    <name>security.masterregion.protocol.acl</name>
    <value>*</value>
    <description>ACL for HMasterRegionInterface protocol implementations
    (for HRegionServers communicating with HMaster)
    The ACL is a comma-separated list of user and group names. The user and 
    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
    A special value of "*" means all users are allowed.</description>
  </property>
</configuration>
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/**
 * Copyright 2010 The Apache Software Foundation
 *
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
-->
<configuration>
<property>
    <name>hbase.rootdir</name>
        <value>hdfs://blade1:9000/hbase</value>
            <description>The directory shared by RegionServers.</description>
            </property>
            <property>
                <name>hbase.cluster.distributed</name>
                    <value>true</value>
                    </property>
                    <property>
                        <name>hbase.zookeeper.quorum</name>
                            <value>blade1,blade2,blade4</value>
                            </property>
                             <property>
                                 <name>hbase.zookeeper.property.dataDir</name>
                                    <value>/home/tianguanhua1/zookeeper/data</value>
                                     </property>
                                      <property>
                                        <name>dfs.support.append</name>
                                           <value>true</value>
                                            </property>
                                             <property>
                                               <name>dfs.datanode.max.xcievers</name>
                                                  <value>4096</value>
                                                     </property>
                                                      <property>
                                                       <name>hbase.master</name>
                                                        <value>blade1:60000</value>
                                                         </property>
</configuration>
# Define some default values that can be overridden by system properties
hbase.root.logger=INFO,console
hbase.security.logger=INFO,console
hbase.log.dir=.
hbase.log.file=hbase.log

# Define the root logger to the system property "hbase.root.logger".
log4j.rootLogger=${hbase.root.logger}

# Logging Threshold
log4j.threshold=ALL

#
# Daily Rolling File Appender
#
log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}

# Rollver at midnight
log4j.appender.DRFA.DatePattern=.yyyy-MM-dd

# 30-day backup
#log4j.appender.DRFA.MaxBackupIndex=30
log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout

# Pattern format: Date LogLevel LoggerName LogMessage
log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

# Debugging Pattern format
#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n

#
# Security audit appender
#
hbase.security.log.file=SecurityAuth.audit
log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
log4j.appender.DRFAS.File=${hbase.log.dir}/${hbase.security.log.file}
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.category.SecurityLogger=${hbase.security.logger}
log4j.additivity.SecurityLogger=false

#
# Null Appender
#
log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender

#
# console
# Add "console" to rootlogger above if you want to use this 
#
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n

# Custom Logging levels

log4j.logger.org.apache.zookeeper=INFO
#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
log4j.logger.org.apache.hadoop.hbase=DEBUG
# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
#log4j.logger.org.apache.hadoop.dfs=DEBUG
# Set this class to log INFO only otherwise its OTT

# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG

# Uncomment the below if you want to remove logging of client region caching'
# and scan of .META. messages
# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
blade2
blade4
blade5
blade6
blade7
blade8
[root@blade1 hbase]#



-----邮件原件-----
发件人: tgh [mailto:guanhua.tian@ia.ac.cn]
发送时间: 2012年12月20日 18:17
收件人: user@hbase.apache.org
主题: code for hbase 0.90 ries ERROR in hbase 0.94

Hi
	I used hbase0.90,and now turn to use hbase 0.94 , and my program is ERROR,

	Could you help me

------------------- my code -----------------------
		Configuration hbase_config = new Configuration();
		hbase_config.set("hbase.zookeeper.quorum",
sms_zookper_service);
		HTable table = null;

		while (!m_bStop) {
			if (table == null) {
				try {
					// log.info(String.format("Connect
To HBase %s", args));
					table = new
HTable(HBaseConfiguration.create(hbase_config), sms_hbase_table);  //// there is the ERROR, 
					log.info("Connect To HBase
Completely");
				} catch (Exception e1) {
					e1.printStackTrace();
					SetError();
				}

-----------------------------log ERROR -------------- Exception in thread "Thread-1" java.lang.NoClassDefFoundError:
org/apache/commons/configuration/Configuration
        at
org.apache.hadoop.metrics2.lib.DefaultMetricsSystem.<init>(DefaultMetricsSys
tem.java:37)
        at
org.apache.hadoop.metrics2.lib.DefaultMetricsSystem.<clinit>(DefaultMetricsS
ystem.java:34)
        at
org.apache.hadoop.security.UgiInstrumentation.create(UgiInstrumentation.java
:51)
        at
org.apache.hadoop.security.UserGroupInformation.initialize(UserGroupInformat
ion.java:216)
        at
org.apache.hadoop.security.UserGroupInformation.ensureInitialized(UserGroupI
nformation.java:184)
        at
org.apache.hadoop.security.UserGroupInformation.isSecurityEnabled(UserGroupI
nformation.java:236)
        at
org.apache.hadoop.security.UserGroupInformation.getLoginUser(UserGroupInform
ation.java:477)
        at
org.apache.hadoop.security.UserGroupInformation.getCurrentUser(UserGroupInfo
rmation.java:463)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57
)
        at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl
.java:43)
        at java.lang.reflect.Method.invoke(Method.java:601)
        at org.apache.hadoop.hbase.util.Methods.call(Methods.java:37)
        at org.apache.hadoop.hbase.security.User.call(User.java:586)
        at org.apache.hadoop.hbase.security.User.callStatic(User.java:576)
        at org.apache.hadoop.hbase.security.User.access$400(User.java:50)
        at
org.apache.hadoop.hbase.security.User$SecureHadoopUser.<init>(User.java:393)
        at
org.apache.hadoop.hbase.security.User$SecureHadoopUser.<init>(User.java:388)
        at org.apache.hadoop.hbase.security.User.getCurrent(User.java:139)
        at
org.apache.hadoop.hbase.client.HConnectionManager$HConnectionKey.<init>(HCon
nectionManager.java:412)
        at
org.apache.hadoop.hbase.client.HConnectionManager.getConnection(HConnectionM
anager.java:179)
        at org.apache.hadoop.hbase.client.HTable.<init>(HTable.java:155)
        at org.apache.hadoop.hbase.client.HTable.<init>(HTable.java:133)
        at com.pattek.sms.opr.store.SmsStore.run(SmsStore.java:266)
        at java.lang.Thread.run(Thread.java:722)
Caused by: java.lang.ClassNotFoundException:
org.apache.commons.configuration.Configuration
        at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
        at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
        at java.security.AccessController.doPrivileged(Native Method)
        at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:423)
        at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:356)
        ... 25 more
Dec 20, 2012 6:09:01 PM com