You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ji...@apache.org on 2008/07/30 03:45:45 UTC
svn commit: r680902 [1/2] - in /hadoop/hbase/trunk: CHANGES.txt
conf/hbase-default.xml src/java/org/apache/hadoop/hbase/HConstants.java
src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
Author: jimk
Date: Tue Jul 29 18:45:44 2008
New Revision: 680902
URL: http://svn.apache.org/viewvc?rev=680902&view=rev
Log:
HBASE-511 Do exponential backoff in clients on NSRE, WRE, ISE, etc.
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/conf/hbase-default.xml
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=680902&r1=680901&r2=680902&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue Jul 29 18:45:44 2008
@@ -344,6 +344,7 @@
Cryans via JimK)
HBASE-733 Enhance Cell so that it can contain multiple values at multiple
timestamps
+ HBASE-511 Do exponential backoff in clients on NSRE, WRE, ISE, etc.
OPTIMIZATIONS
HBASE-430 Performance: Scanners and getRow return maps with duplicate data
Modified: hadoop/hbase/trunk/conf/hbase-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/conf/hbase-default.xml?rev=680902&r1=680901&r2=680902&view=diff
==============================================================================
--- hadoop/hbase/trunk/conf/hbase-default.xml (original)
+++ hadoop/hbase/trunk/conf/hbase-default.xml Tue Jul 29 18:45:44 2008
@@ -1,309 +1,309 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
- <property>
- <name>hbase.master</name>
- <value>local</value>
- <description>The host and port that the HBase master runs at.
- A value of 'local' runs the master and a regionserver in
- a single process.
- </description>
- </property>
- <property>
- <name>hbase.rootdir</name>
- <value>file:///tmp/hbase-${user.name}/hbase</value>
- <description>The directory shared by region servers.
- Should be fully-qualified to include the filesystem to use.
- E.g: hdfs://NAMENODE_SERVER:PORT/HBASE_ROOTDIR
- </description>
- </property>
- <property>
- <name>hbase.master.info.port</name>
- <value>60010</value>
- <description>The port for the hbase master web UI
- Set to -1 if you do not want the info server to run.
- </description>
- </property>
- <property>
- <name>hbase.master.info.bindAddress</name>
- <value>0.0.0.0</value>
- <description>The address for the hbase master web UI
- </description>
- </property>
- <property>
- <name>hbase.regionserver</name>
- <value>0.0.0.0:60020</value>
- <description>The host and port a HBase region server runs at.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.dns.interface</name>
- <value>default</value>
- <description>Name of the network interface which a regionserver
- should use to determine it's "real" IP address. This lookup
- prevents strings like "localhost" and "127.0.0.1" from being
- reported back to the master.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.dns.interface</name>
- <value>default</value>
- <description>Name of the network interface which a regionserver
- should use to determine it's "real" IP address. This lookup
- prevents strings like "localhost" and "127.0.0.1" from being
- reported back to the master.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.dns.interface</name>
- <value>default</value>
- <description>Name of the network interface which a regionserver
- should use to determine it's "real" IP address. This lookup
- prevents strings like "localhost" and "127.0.0.1" from being
- reported back to the master.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.info.port</name>
- <value>60030</value>
- <description>The port for the hbase regionserver web UI
- Set to -1 if you do not want the info server to run.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.info.bindAddress</name>
- <value>0.0.0.0</value>
- <description>The address for the hbase regionserver web UI
- </description>
- </property>
- <property>
- <name>hbase.regionserver.class</name>
- <value>org.apache.hadoop.hbase.ipc.HRegionInterface</value>
- <description>An interface that is assignable to HRegionInterface. Used in HClient for
- opening proxy to remote region server.
- </description>
- </property>
- <property>
- <name>hbase.client.pause</name>
- <value>10000</value>
- <description>General client pause value. Used mostly as value to wait
- before running a retry of a failed get, region lookup, etc.</description>
- </property>
- <property>
- <name>hbase.client.retries.number</name>
- <value>5</value>
- <description>Maximum retries. Used as maximum for all retryable
- operations such as fetching of the root region from root region
- server, getting a cell's value, starting a row update, etc.
- Default: 5.
- </description>
- </property>
- <property>
- <name>hbase.master.meta.thread.rescanfrequency</name>
- <value>60000</value>
- <description>How long the HMaster sleeps (in milliseconds) between scans of
- the root and meta tables.
- </description>
- </property>
- <property>
- <name>hbase.master.lease.period</name>
- <value>120000</value>
- <description>HMaster server lease period in milliseconds. Default is
- 60 seconds. Region servers must report in within this period else
- they are considered dead. On loaded cluster, may need to up this
- period.</description>
- </property>
- <property>
- <name>hbase.hbasemaster.maxregionopen</name>
- <value>120000</value>
- <description>Period to wait for a region open. If regionserver
- takes longer than this interval, assign to a new regionserver.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.lease.period</name>
- <value>30000</value>
- <description>HRegion server lease period in milliseconds. Default is
- 30 seconds. Clients must report in within this period else they are
- considered dead.</description>
- </property>
- <property>
- <name>hbase.server.thread.wakefrequency</name>
- <value>10000</value>
- <description>Time to sleep in between searches for work (in milliseconds).
- Used as sleep interval by service threads such as META scanner and log roller.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.handler.count</name>
- <value>10</value>
- <description>Count of RPC Server instances spun up on RegionServers
- Same property is used by the HMaster for count of master handlers.
- Default is 10.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.msginterval</name>
- <value>3000</value>
- <description>Interval between messages from the RegionServer to HMaster
- in milliseconds. Default is 15. Set this value low if you want unit
- tests to be responsive.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.maxlogentries</name>
- <value>30000</value>
- <description>Rotate the HRegion HLogs when count of entries exceeds this
- value. Default: 30,000. Value is checked by a thread that runs every
- hbase.server.thread.wakefrequency.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.optionalcacheflushinterval</name>
- <value>1800000</value>
- <description>
- Amount of time to wait since the last time a region was flushed before
- invoking an optional cache flush (An optional cache flush is a
- flush even though memcache is not at the memcache.flush.size).
- Default: 30 minutes (in miliseconds)
- </description>
- </property>
- <property>
- <name>hbase.regionserver.optionallogrollinterval</name>
- <value>1800000</value>
- <description>
- Amount of time to wait since the last time a the region server's log was
- rolled before invoking an optional log roll (An optional log roll is a
- one in which the log does not contain hbase.regionserver.maxlogentries).
- Default: 30 minutes (in miliseconds)
- </description>
- </property>
- <property>
- <name>hbase.hregion.memcache.flush.size</name>
- <value>67108864</value>
- <description>
- A HRegion memcache will be flushed to disk if size of the memcache
- exceeds this number of bytes. Value is checked by a thread that runs
- every hbase.server.thread.wakefrequency.
- </description>
- </property>
- <property>
- <name>hbase.hregion.memcache.block.multiplier</name>
- <value>2</value>
- <description>
- Block updates if memcache has hbase.hregion.block.memcache
- time hbase.hregion.flush.size bytes. Useful preventing
- runaway memcache during spikes in update traffic. Without an
- upper-bound, memcache fills such that when it flushes the
- resultant flush files take a long time to compact or split, or
- worse, we OOME.
- </description>
- </property>
- <property>
- <name>hbase.hregion.max.filesize</name>
- <value>268435456</value>
- <description>
- Maximum HStoreFile size. If any one of a column families' HStoreFiles has
- grown to exceed this value, the hosting HRegion is split in two.
- Default: 256M.
- </description>
- </property>
- <property>
- <name>hbase.hstore.compactionThreshold</name>
- <value>3</value>
- <description>
- If more than this number of HStoreFiles in any one HStore
- (one HStoreFile is written per flush of memcache) then a compaction
- is run to rewrite all HStoreFiles files as one. Larger numbers
- put off compaction but when it runs, it takes longer to complete.
- During a compaction, updates cannot be flushed to disk. Long
- compactions require memory sufficient to carry the logging of
- all updates across the duration of the compaction.
-
- If too large, clients timeout during compaction.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.thread.splitcompactcheckfrequency</name>
- <value>20000</value>
- <description>How often a region server runs the split/compaction check.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.nbreservationblocks</name>
- <value>4</value>
- <description>The number of reservation blocks which are used to prevent
- unstable region servers caused by an OOME.
- </description>
- </property>
- <property>
- <name>hbase.io.index.interval</name>
- <value>32</value>
- <description>The interval at which we record offsets in hbase
- store files/mapfiles. Default for stock mapfiles is 128. Index
- files are read into memory. If there are many of them, could prove
- a burden. If so play with the hadoop io.map.index.skip property and
- skip every nth index member when reading back the index into memory.
- </description>
- </property>
- <property>
- <name>hbase.io.seqfile.compression.type</name>
- <value>NONE</value>
- <description>The compression type for hbase sequencefile.Writers
- such as hlog.
- </description>
- </property>
- <property>
- <name>hbase.hstore.blockCache.blockSize</name>
- <value>65536</value>
- <description>The size of each block in any block caches.
- </description>
- </property>
-
- <!-- HbaseShell Configurations -->
- <property>
- <name>hbaseshell.jline.bell.enabled</name>
- <value>true</value>
- <description>
- if true, enable audible keyboard bells if an alert is required.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.globalMemcacheLimit</name>
- <value>536870912</value>
- <description>Maximum size of all memcaches in a region server before new
- updates are blocked and flushes are forced. Defaults to 512MB.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.globalMemcacheLimitlowMark</name>
- <value>256435456</value>
- <description>When memcaches are being forced to flush to make room in
- memory, keep flushing until we hit this mark. Defaults to 256MB. Setting
- this value equal to hbase.regionserver.globalmemcachelimit causes the
- minimum possible flushing to occur when updates are blocked due to
- memcache limiting.
- </description>
- </property>
-</configuration>
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>hbase.master</name>
+ <value>local</value>
+ <description>The host and port that the HBase master runs at.
+ A value of 'local' runs the master and a regionserver in
+ a single process.
+ </description>
+ </property>
+ <property>
+ <name>hbase.rootdir</name>
+ <value>file:///tmp/hbase-${user.name}/hbase</value>
+ <description>The directory shared by region servers.
+ Should be fully-qualified to include the filesystem to use.
+ E.g: hdfs://NAMENODE_SERVER:PORT/HBASE_ROOTDIR
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.info.port</name>
+ <value>60010</value>
+ <description>The port for the hbase master web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.info.bindAddress</name>
+ <value>0.0.0.0</value>
+ <description>The address for the hbase master web UI
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver</name>
+ <value>0.0.0.0:60020</value>
+ <description>The host and port a HBase region server runs at.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.dns.interface</name>
+ <value>default</value>
+ <description>Name of the network interface which a regionserver
+ should use to determine it's "real" IP address. This lookup
+ prevents strings like "localhost" and "127.0.0.1" from being
+ reported back to the master.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.dns.interface</name>
+ <value>default</value>
+ <description>Name of the network interface which a regionserver
+ should use to determine it's "real" IP address. This lookup
+ prevents strings like "localhost" and "127.0.0.1" from being
+ reported back to the master.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.dns.interface</name>
+ <value>default</value>
+ <description>Name of the network interface which a regionserver
+ should use to determine it's "real" IP address. This lookup
+ prevents strings like "localhost" and "127.0.0.1" from being
+ reported back to the master.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port</name>
+ <value>60030</value>
+ <description>The port for the hbase regionserver web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.bindAddress</name>
+ <value>0.0.0.0</value>
+ <description>The address for the hbase regionserver web UI
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.class</name>
+ <value>org.apache.hadoop.hbase.ipc.HRegionInterface</value>
+ <description>An interface that is assignable to HRegionInterface. Used in HClient for
+ opening proxy to remote region server.
+ </description>
+ </property>
+ <property>
+ <name>hbase.client.pause</name>
+ <value>10000</value>
+ <description>General client pause value. Used mostly as value to wait
+ before running a retry of a failed get, region lookup, etc.</description>
+ </property>
+ <property>
+ <name>hbase.client.retries.number</name>
+ <value>10</value>
+ <description>Maximum retries. Used as maximum for all retryable
+ operations such as fetching of the root region from root region
+ server, getting a cell's value, starting a row update, etc.
+ Default: 10.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.meta.thread.rescanfrequency</name>
+ <value>60000</value>
+ <description>How long the HMaster sleeps (in milliseconds) between scans of
+ the root and meta tables.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.lease.period</name>
+ <value>120000</value>
+ <description>HMaster server lease period in milliseconds. Default is
+ 60 seconds. Region servers must report in within this period else
+ they are considered dead. On loaded cluster, may need to up this
+ period.</description>
+ </property>
+ <property>
+ <name>hbase.hbasemaster.maxregionopen</name>
+ <value>120000</value>
+ <description>Period to wait for a region open. If regionserver
+ takes longer than this interval, assign to a new regionserver.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.lease.period</name>
+ <value>30000</value>
+ <description>HRegion server lease period in milliseconds. Default is
+ 30 seconds. Clients must report in within this period else they are
+ considered dead.</description>
+ </property>
+ <property>
+ <name>hbase.server.thread.wakefrequency</name>
+ <value>10000</value>
+ <description>Time to sleep in between searches for work (in milliseconds).
+ Used as sleep interval by service threads such as META scanner and log roller.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.handler.count</name>
+ <value>10</value>
+ <description>Count of RPC Server instances spun up on RegionServers
+ Same property is used by the HMaster for count of master handlers.
+ Default is 10.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.msginterval</name>
+ <value>3000</value>
+ <description>Interval between messages from the RegionServer to HMaster
+ in milliseconds. Default is 15. Set this value low if you want unit
+ tests to be responsive.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.maxlogentries</name>
+ <value>30000</value>
+ <description>Rotate the HRegion HLogs when count of entries exceeds this
+ value. Default: 30,000. Value is checked by a thread that runs every
+ hbase.server.thread.wakefrequency.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.optionalcacheflushinterval</name>
+ <value>1800000</value>
+ <description>
+ Amount of time to wait since the last time a region was flushed before
+ invoking an optional cache flush (An optional cache flush is a
+ flush even though memcache is not at the memcache.flush.size).
+ Default: 30 minutes (in miliseconds)
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.optionallogrollinterval</name>
+ <value>1800000</value>
+ <description>
+ Amount of time to wait since the last time a the region server's log was
+ rolled before invoking an optional log roll (An optional log roll is a
+ one in which the log does not contain hbase.regionserver.maxlogentries).
+ Default: 30 minutes (in miliseconds)
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.memcache.flush.size</name>
+ <value>67108864</value>
+ <description>
+ A HRegion memcache will be flushed to disk if size of the memcache
+ exceeds this number of bytes. Value is checked by a thread that runs
+ every hbase.server.thread.wakefrequency.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.memcache.block.multiplier</name>
+ <value>2</value>
+ <description>
+ Block updates if memcache has hbase.hregion.block.memcache
+ time hbase.hregion.flush.size bytes. Useful preventing
+ runaway memcache during spikes in update traffic. Without an
+ upper-bound, memcache fills such that when it flushes the
+ resultant flush files take a long time to compact or split, or
+ worse, we OOME.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.max.filesize</name>
+ <value>268435456</value>
+ <description>
+ Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+ grown to exceed this value, the hosting HRegion is split in two.
+ Default: 256M.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compactionThreshold</name>
+ <value>3</value>
+ <description>
+ If more than this number of HStoreFiles in any one HStore
+ (one HStoreFile is written per flush of memcache) then a compaction
+ is run to rewrite all HStoreFiles files as one. Larger numbers
+ put off compaction but when it runs, it takes longer to complete.
+ During a compaction, updates cannot be flushed to disk. Long
+ compactions require memory sufficient to carry the logging of
+ all updates across the duration of the compaction.
+
+ If too large, clients timeout during compaction.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.thread.splitcompactcheckfrequency</name>
+ <value>20000</value>
+ <description>How often a region server runs the split/compaction check.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.nbreservationblocks</name>
+ <value>4</value>
+ <description>The number of reservation blocks which are used to prevent
+ unstable region servers caused by an OOME.
+ </description>
+ </property>
+ <property>
+ <name>hbase.io.index.interval</name>
+ <value>32</value>
+ <description>The interval at which we record offsets in hbase
+ store files/mapfiles. Default for stock mapfiles is 128. Index
+ files are read into memory. If there are many of them, could prove
+ a burden. If so play with the hadoop io.map.index.skip property and
+ skip every nth index member when reading back the index into memory.
+ </description>
+ </property>
+ <property>
+ <name>hbase.io.seqfile.compression.type</name>
+ <value>NONE</value>
+ <description>The compression type for hbase sequencefile.Writers
+ such as hlog.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.blockCache.blockSize</name>
+ <value>65536</value>
+ <description>The size of each block in any block caches.
+ </description>
+ </property>
+
+ <!-- HbaseShell Configurations -->
+ <property>
+ <name>hbaseshell.jline.bell.enabled</name>
+ <value>true</value>
+ <description>
+ if true, enable audible keyboard bells if an alert is required.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.globalMemcacheLimit</name>
+ <value>536870912</value>
+ <description>Maximum size of all memcaches in a region server before new
+ updates are blocked and flushes are forced. Defaults to 512MB.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.globalMemcacheLimitlowMark</name>
+ <value>256435456</value>
+ <description>When memcaches are being forced to flush to make room in
+ memory, keep flushing until we hit this mark. Defaults to 256MB. Setting
+ this value equal to hbase.regionserver.globalmemcachelimit causes the
+ minimum possible flushing to occur when updates are blocked due to
+ memcache limiting.
+ </description>
+ </property>
+</configuration>
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java?rev=680902&r1=680901&r2=680902&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java Tue Jul 29 18:45:44 2008
@@ -1,229 +1,235 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.hadoop.hbase.ipc.HRegionInterface;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * HConstants holds a bunch of HBase-related constants
- */
-public interface HConstants {
-
- /** long constant for zero */
- static final Long ZERO_L = Long.valueOf(0L);
-
- static final String NINES = "99999999999999";
- static final String ZEROES = "00000000000000";
-
- // For migration
-
- /** name of version file */
- static final String VERSION_FILE_NAME = "hbase.version";
-
- /**
- * Current version of file system
- * Version 4 supports only one kind of bloom filter
- */
- public static final String FILE_SYSTEM_VERSION = "4";
-
- // Configuration parameters
-
- // TODO: URL for hbase master like hdfs URLs with host and port.
- // Like jdbc URLs? URLs could be used to refer to table cells?
- // jdbc:mysql://[host][,failoverhost...][:port]/[database]
- // jdbc:mysql://[host][,failoverhost...][:port]/[database][?propertyName1][=propertyValue1][&propertyName2][=propertyValue2]...
-
- // Key into HBaseConfiguration for the hbase.master address.
- // TODO: Support 'local': i.e. default of all running in single
- // process. Same for regionserver. TODO: Is having HBase homed
- // on port 60k OK?
-
- /** Parameter name for master address */
- static final String MASTER_ADDRESS = "hbase.master";
-
- /** default host address */
- static final String DEFAULT_HOST = "0.0.0.0";
-
- /** default port that the master listens on */
- static final int DEFAULT_MASTER_PORT = 60000;
-
- /** Default master address */
- static final String DEFAULT_MASTER_ADDRESS = DEFAULT_HOST + ":" +
- DEFAULT_MASTER_PORT;
-
- /** default port for master web api */
- static final int DEFAULT_MASTER_INFOPORT = 60010;
-
- /** Parameter name for hbase.regionserver address. */
- static final String REGIONSERVER_ADDRESS = "hbase.regionserver";
-
- /** Default region server address */
- static final String DEFAULT_REGIONSERVER_ADDRESS = DEFAULT_HOST + ":60020";
-
- /** default port for region server web api */
- static final int DEFAULT_REGIONSERVER_INFOPORT = 60030;
-
- /** Parameter name for what region server interface to use. */
- static final String REGION_SERVER_CLASS = "hbase.regionserver.class";
-
- /** Parameter name for what region server implementation to use. */
- static final String REGION_SERVER_IMPL= "hbase.regionserver.impl";
-
- /** Default region server interface class name. */
- static final String DEFAULT_REGION_SERVER_CLASS = HRegionInterface.class.getName();
-
- /** Parameter name for how often threads should wake up */
- static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency";
-
- /** Parameter name for HBase instance root directory */
- static final String HBASE_DIR = "hbase.rootdir";
-
- /** Used to construct the name of the log directory for a region server */
- static final String HREGION_LOGDIR_NAME = "log";
-
- /** Name of old log file for reconstruction */
- static final String HREGION_OLDLOGFILE_NAME = "oldlogfile.log";
-
- /** Default maximum file size */
- static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
-
- /** Default size of a reservation block */
- static final int DEFAULT_SIZE_RESERVATION_BLOCK = 1024 * 1024 * 5;
-
- // Always store the location of the root table's HRegion.
- // This HRegion is never split.
-
- // region name = table + startkey + regionid. This is the row key.
- // each row in the root and meta tables describes exactly 1 region
- // Do we ever need to know all the information that we are storing?
-
- // Note that the name of the root table starts with "-" and the name of the
- // meta table starts with "." Why? it's a trick. It turns out that when we
- // store region names in memory, we use a SortedMap. Since "-" sorts before
- // "." (and since no other table name can start with either of these
- // characters, the root region will always be the first entry in such a Map,
- // followed by all the meta regions (which will be ordered by their starting
- // row key as well), followed by all user tables. So when the Master is
- // choosing regions to assign, it will always choose the root region first,
- // followed by the meta regions, followed by user regions. Since the root
- // and meta regions always need to be on-line, this ensures that they will
- // be the first to be reassigned if the server(s) they are being served by
- // should go down.
-
- /** The root table's name.*/
- static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-");
-
- /** The META table's name. */
- static final byte [] META_TABLE_NAME = Bytes.toBytes(".META.");
-
- // Defines for the column names used in both ROOT and META HBase 'meta' tables.
-
- /** The ROOT and META column family (string) */
- static final String COLUMN_FAMILY_STR = "info:";
-
- /** The META historian column family (string) */
- static final String COLUMN_FAMILY_HISTORIAN_STR = "historian:";
-
- /** The ROOT and META column family */
- static final byte [] COLUMN_FAMILY = Bytes.toBytes(COLUMN_FAMILY_STR);
-
- /** The META historian column family */
- static final byte [] COLUMN_FAMILY_HISTORIAN = Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR);
-
- /** Array of meta column names */
- static final byte[][] COLUMN_FAMILY_ARRAY = new byte[][] {COLUMN_FAMILY};
-
- /** ROOT/META column family member - contains HRegionInfo */
- static final byte [] COL_REGIONINFO =
- Bytes.toBytes(COLUMN_FAMILY_STR + "regioninfo");
-
- /** Array of column - contains HRegionInfo */
- static final byte[][] COL_REGIONINFO_ARRAY = new byte[][] {COL_REGIONINFO};
-
- /** ROOT/META column family member - contains HServerAddress.toString() */
- static final byte[] COL_SERVER = Bytes.toBytes(COLUMN_FAMILY_STR + "server");
-
- /** ROOT/META column family member - contains server start code (a long) */
- static final byte [] COL_STARTCODE =
- Bytes.toBytes(COLUMN_FAMILY_STR + "serverstartcode");
-
- /** the lower half of a split region */
- static final byte [] COL_SPLITA = Bytes.toBytes(COLUMN_FAMILY_STR + "splitA");
-
- /** the upper half of a split region */
- static final byte [] COL_SPLITB = Bytes.toBytes(COLUMN_FAMILY_STR + "splitB");
-
- /** All the columns in the catalog -ROOT- and .META. tables.
- */
- static final byte[][] ALL_META_COLUMNS = {COL_REGIONINFO, COL_SERVER,
- COL_STARTCODE, COL_SPLITA, COL_SPLITB};
-
- // Other constants
-
- /**
- * An empty instance.
- */
- static final byte [] EMPTY_BYTE_ARRAY = new byte [0];
-
- /**
- * Used by scanners, etc when they want to start at the beginning of a region
- */
- static final byte [] EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
-
- /**
- * Last row in a table.
- */
- static final byte [] EMPTY_END_ROW = EMPTY_START_ROW;
-
- /**
- * Used by scanners and others when they're trying to detect the end of a
- * table
- */
- static final byte [] LAST_ROW = EMPTY_BYTE_ARRAY;
-
- /** When we encode strings, we always specify UTF8 encoding */
- static final String UTF8_ENCODING = "UTF-8";
-
- /**
- * Timestamp to use when we want to refer to the latest cell.
- * This is the timestamp sent by clients when no timestamp is specified on
- * commit.
- */
- static final long LATEST_TIMESTAMP = Long.MAX_VALUE;
-
- /**
- * Define for 'return-all-versions'.
- */
- static final int ALL_VERSIONS = Integer.MAX_VALUE;
-
- /**
- * Unlimited time-to-live.
- */
- static final int FOREVER = -1;
-
- public static final String HBASE_CLIENT_RETRIES_NUMBER_KEY =
- "hbase.client.retries.number";
- public static final int DEFAULT_CLIENT_RETRIES = 5;
-
- public static final String NAME = "NAME";
- public static final String VERSIONS = "VERSIONS";
- public static final String IN_MEMORY = "IN_MEMORY";
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * HConstants holds a bunch of HBase-related constants
+ */
+public interface HConstants {
+
+ /** long constant for zero */
+ static final Long ZERO_L = Long.valueOf(0L);
+
+ static final String NINES = "99999999999999";
+ static final String ZEROES = "00000000000000";
+
+ // For migration
+
+ /** name of version file */
+ static final String VERSION_FILE_NAME = "hbase.version";
+
+ /**
+ * Current version of file system
+ * Version 4 supports only one kind of bloom filter
+ */
+ public static final String FILE_SYSTEM_VERSION = "4";
+
+ // Configuration parameters
+
+ // TODO: URL for hbase master like hdfs URLs with host and port.
+ // Like jdbc URLs? URLs could be used to refer to table cells?
+ // jdbc:mysql://[host][,failoverhost...][:port]/[database]
+ // jdbc:mysql://[host][,failoverhost...][:port]/[database][?propertyName1][=propertyValue1][&propertyName2][=propertyValue2]...
+
+ // Key into HBaseConfiguration for the hbase.master address.
+ // TODO: Support 'local': i.e. default of all running in single
+ // process. Same for regionserver. TODO: Is having HBase homed
+ // on port 60k OK?
+
+ /** Parameter name for master address */
+ static final String MASTER_ADDRESS = "hbase.master";
+
+ /** default host address */
+ static final String DEFAULT_HOST = "0.0.0.0";
+
+ /** default port that the master listens on */
+ static final int DEFAULT_MASTER_PORT = 60000;
+
+ /** Default master address */
+ static final String DEFAULT_MASTER_ADDRESS = DEFAULT_HOST + ":" +
+ DEFAULT_MASTER_PORT;
+
+ /** default port for master web api */
+ static final int DEFAULT_MASTER_INFOPORT = 60010;
+
+ /** Parameter name for hbase.regionserver address. */
+ static final String REGIONSERVER_ADDRESS = "hbase.regionserver";
+
+ /** Default region server address */
+ static final String DEFAULT_REGIONSERVER_ADDRESS = DEFAULT_HOST + ":60020";
+
+ /** default port for region server web api */
+ static final int DEFAULT_REGIONSERVER_INFOPORT = 60030;
+
+ /** Parameter name for what region server interface to use. */
+ static final String REGION_SERVER_CLASS = "hbase.regionserver.class";
+
+ /** Parameter name for what region server implementation to use. */
+ static final String REGION_SERVER_IMPL= "hbase.regionserver.impl";
+
+ /** Default region server interface class name. */
+ static final String DEFAULT_REGION_SERVER_CLASS = HRegionInterface.class.getName();
+
+ /** Parameter name for how often threads should wake up */
+ static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency";
+
+ /** Parameter name for HBase instance root directory */
+ static final String HBASE_DIR = "hbase.rootdir";
+
+ /** Used to construct the name of the log directory for a region server */
+ static final String HREGION_LOGDIR_NAME = "log";
+
+ /** Name of old log file for reconstruction */
+ static final String HREGION_OLDLOGFILE_NAME = "oldlogfile.log";
+
+ /** Default maximum file size */
+ static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
+
+ /** Default size of a reservation block */
+ static final int DEFAULT_SIZE_RESERVATION_BLOCK = 1024 * 1024 * 5;
+
+ // Always store the location of the root table's HRegion.
+ // This HRegion is never split.
+
+ // region name = table + startkey + regionid. This is the row key.
+ // each row in the root and meta tables describes exactly 1 region
+ // Do we ever need to know all the information that we are storing?
+
+ // Note that the name of the root table starts with "-" and the name of the
+ // meta table starts with "." Why? it's a trick. It turns out that when we
+ // store region names in memory, we use a SortedMap. Since "-" sorts before
+ // "." (and since no other table name can start with either of these
+ // characters, the root region will always be the first entry in such a Map,
+ // followed by all the meta regions (which will be ordered by their starting
+ // row key as well), followed by all user tables. So when the Master is
+ // choosing regions to assign, it will always choose the root region first,
+ // followed by the meta regions, followed by user regions. Since the root
+ // and meta regions always need to be on-line, this ensures that they will
+ // be the first to be reassigned if the server(s) they are being served by
+ // should go down.
+
+ /** The root table's name.*/
+ static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-");
+
+ /** The META table's name. */
+ static final byte [] META_TABLE_NAME = Bytes.toBytes(".META.");
+
+ // Defines for the column names used in both ROOT and META HBase 'meta' tables.
+
+ /** The ROOT and META column family (string) */
+ static final String COLUMN_FAMILY_STR = "info:";
+
+ /** The META historian column family (string) */
+ static final String COLUMN_FAMILY_HISTORIAN_STR = "historian:";
+
+ /** The ROOT and META column family */
+ static final byte [] COLUMN_FAMILY = Bytes.toBytes(COLUMN_FAMILY_STR);
+
+ /** The META historian column family */
+ static final byte [] COLUMN_FAMILY_HISTORIAN = Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR);
+
+ /** Array of meta column names */
+ static final byte[][] COLUMN_FAMILY_ARRAY = new byte[][] {COLUMN_FAMILY};
+
+ /** ROOT/META column family member - contains HRegionInfo */
+ static final byte [] COL_REGIONINFO =
+ Bytes.toBytes(COLUMN_FAMILY_STR + "regioninfo");
+
+ /** Array of column - contains HRegionInfo */
+ static final byte[][] COL_REGIONINFO_ARRAY = new byte[][] {COL_REGIONINFO};
+
+ /** ROOT/META column family member - contains HServerAddress.toString() */
+ static final byte[] COL_SERVER = Bytes.toBytes(COLUMN_FAMILY_STR + "server");
+
+ /** ROOT/META column family member - contains server start code (a long) */
+ static final byte [] COL_STARTCODE =
+ Bytes.toBytes(COLUMN_FAMILY_STR + "serverstartcode");
+
+ /** the lower half of a split region */
+ static final byte [] COL_SPLITA = Bytes.toBytes(COLUMN_FAMILY_STR + "splitA");
+
+ /** the upper half of a split region */
+ static final byte [] COL_SPLITB = Bytes.toBytes(COLUMN_FAMILY_STR + "splitB");
+
+ /** All the columns in the catalog -ROOT- and .META. tables.
+ */
+ static final byte[][] ALL_META_COLUMNS = {COL_REGIONINFO, COL_SERVER,
+ COL_STARTCODE, COL_SPLITA, COL_SPLITB};
+
+ // Other constants
+
+ /**
+ * An empty instance.
+ */
+ static final byte [] EMPTY_BYTE_ARRAY = new byte [0];
+
+ /**
+ * Used by scanners, etc when they want to start at the beginning of a region
+ */
+ static final byte [] EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
+
+ /**
+ * Last row in a table.
+ */
+ static final byte [] EMPTY_END_ROW = EMPTY_START_ROW;
+
+ /**
+ * Used by scanners and others when they're trying to detect the end of a
+ * table
+ */
+ static final byte [] LAST_ROW = EMPTY_BYTE_ARRAY;
+
+ /** When we encode strings, we always specify UTF8 encoding */
+ static final String UTF8_ENCODING = "UTF-8";
+
+ /**
+ * Timestamp to use when we want to refer to the latest cell.
+ * This is the timestamp sent by clients when no timestamp is specified on
+ * commit.
+ */
+ static final long LATEST_TIMESTAMP = Long.MAX_VALUE;
+
+ /**
+ * Define for 'return-all-versions'.
+ */
+ static final int ALL_VERSIONS = Integer.MAX_VALUE;
+
+ /**
+ * Unlimited time-to-live.
+ */
+ static final int FOREVER = -1;
+
+ public static final String HBASE_CLIENT_RETRIES_NUMBER_KEY =
+ "hbase.client.retries.number";
+ public static final int DEFAULT_CLIENT_RETRIES = 5;
+
+ public static final String NAME = "NAME";
+ public static final String VERSIONS = "VERSIONS";
+ public static final String IN_MEMORY = "IN_MEMORY";
+
+ /**
+ * This is a retry backoff multiplier table similar to the BSD TCP syn
+ * backoff table, a bit more aggressive than simple exponential backoff.
+ */
+ public static int RETRY_BACKOFF[] = { 1, 1, 1, 1, 2, 4, 8, 16, 32, 64 };
}
\ No newline at end of file
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java?rev=680902&r1=680901&r2=680902&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java Tue Jul 29 18:45:44 2008
@@ -1,619 +1,634 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.RemoteExceptionHandler;
-import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.io.Cell;
-import org.apache.hadoop.hbase.io.RowResult;
-import org.apache.hadoop.hbase.ipc.HMasterInterface;
-import org.apache.hadoop.hbase.ipc.HRegionInterface;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.RemoteException;
-
-/**
- * Provides administrative functions for HBase
- */
-public class HBaseAdmin {
- private final Log LOG = LogFactory.getLog(this.getClass().getName());
- private final HConnection connection;
- private final long pause;
- private final int numRetries;
- private volatile HMasterInterface master;
-
- /**
- * Constructor
- *
- * @param conf Configuration object
- * @throws MasterNotRunningException
- */
- public HBaseAdmin(HBaseConfiguration conf) throws MasterNotRunningException {
- this.connection = HConnectionManager.getConnection(conf);
- this.pause = conf.getLong("hbase.client.pause", 30 * 1000);
- this.numRetries = conf.getInt("hbase.client.retries.number", 5);
- this.master = connection.getMaster();
- }
-
- /**
- * @return proxy connection to master server for this instance
- * @throws MasterNotRunningException
- */
- public HMasterInterface getMaster() throws MasterNotRunningException{
- return this.connection.getMaster();
- }
-
- /** @return - true if the master server is running */
- public boolean isMasterRunning() {
- return this.connection.isMasterRunning();
- }
-
- /**
- * @param tableName Table to check.
- * @return True if table exists already.
- * @throws MasterNotRunningException
- */
- public boolean tableExists(final Text tableName)
- throws MasterNotRunningException {
- return tableExists(tableName.getBytes());
- }
-
- /**
- * @param tableName Table to check.
- * @return True if table exists already.
- * @throws MasterNotRunningException
- */
- public boolean tableExists(final String tableName)
- throws MasterNotRunningException {
- return tableExists(Bytes.toBytes(tableName));
- }
-
- /**
- * @param tableName Table to check.
- * @return True if table exists already.
- * @throws MasterNotRunningException
- */
- public boolean tableExists(final byte [] tableName)
- throws MasterNotRunningException {
- if (this.master == null) {
- throw new MasterNotRunningException("master has been shut down");
- }
- return connection.tableExists(tableName);
- }
-
- /**
- * List all the userspace tables. In other words, scan the META table.
- *
- * If we wanted this to be really fast, we could implement a special
- * catalog table that just contains table names and their descriptors.
- * Right now, it only exists as part of the META table's region info.
- *
- * @return - returns an array of HTableDescriptors
- * @throws IOException
- */
- public HTableDescriptor[] listTables() throws IOException {
- return this.connection.listTables();
- }
-
- /**
- * Creates a new table
- *
- * @param desc table descriptor for table
- *
- * @throws IllegalArgumentException if the table name is reserved
- * @throws MasterNotRunningException if master is not running
- * @throws TableExistsException if table already exists (If concurrent
- * threads, the table may have been created between test-for-existence
- * and attempt-at-creation).
- * @throws IOException
- */
- public void createTable(HTableDescriptor desc)
- throws IOException {
- HTableDescriptor.isLegalTableName(desc.getName());
- createTableAsync(desc);
- for (int tries = 0; tries < numRetries; tries++) {
- try {
- // Wait for new table to come on-line
- connection.locateRegion(desc.getName(), HConstants.EMPTY_START_ROW);
- break;
-
- } catch (TableNotFoundException e) {
- if (tries == numRetries - 1) {
- // Ran out of tries
- throw e;
- }
- }
- try {
- Thread.sleep(pause);
- } catch (InterruptedException e) {
- // continue
- }
- }
- }
-
- /**
- * Creates a new table but does not block and wait for it to come online.
- *
- * @param desc table descriptor for table
- *
- * @throws IllegalArgumentException Bad table name.
- * @throws MasterNotRunningException if master is not running
- * @throws TableExistsException if table already exists (If concurrent
- * threads, the table may have been created between test-for-existence
- * and attempt-at-creation).
- * @throws IOException
- */
- public void createTableAsync(HTableDescriptor desc)
- throws IOException {
- if (this.master == null) {
- throw new MasterNotRunningException("master has been shut down");
- }
- HTableDescriptor.isLegalTableName(desc.getName());
- try {
- this.master.createTable(desc);
- } catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
- }
- }
-
- /**
- * Deletes a table
- *
- * @param tableName name of table to delete
- * @throws IOException
- */
- public void deleteTable(final Text tableName) throws IOException {
- deleteTable(tableName.getBytes());
- }
-
- /**
- * Deletes a table
- *
- * @param tableName name of table to delete
- * @throws IOException
- */
- public void deleteTable(final String tableName) throws IOException {
- deleteTable(Bytes.toBytes(tableName));
- }
-
- /**
- * Deletes a table
- *
- * @param tableName name of table to delete
- * @throws IOException
- */
- public void deleteTable(final byte [] tableName) throws IOException {
- if (this.master == null) {
- throw new MasterNotRunningException("master has been shut down");
- }
- HTableDescriptor.isLegalTableName(tableName);
- HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
- try {
- this.master.deleteTable(tableName);
- } catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
- }
-
- // Wait until first region is deleted
- HRegionInterface server =
- connection.getHRegionConnection(firstMetaServer.getServerAddress());
- HRegionInfo info = new HRegionInfo();
- for (int tries = 0; tries < numRetries; tries++) {
- long scannerId = -1L;
- try {
- scannerId =
- server.openScanner(firstMetaServer.getRegionInfo().getRegionName(),
- HConstants.COL_REGIONINFO_ARRAY, tableName,
- HConstants.LATEST_TIMESTAMP, null);
- RowResult values = server.next(scannerId);
- if (values == null || values.size() == 0) {
- break;
- }
- boolean found = false;
- for (Map.Entry<byte [], Cell> e: values.entrySet()) {
- if (Bytes.equals(e.getKey(), HConstants.COL_REGIONINFO)) {
- info = (HRegionInfo) Writables.getWritable(
- e.getValue().getValue(), info);
-
- if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
- found = true;
- }
- }
- }
- if (!found) {
- break;
- }
-
- } catch (IOException ex) {
- if(tries == numRetries - 1) { // no more tries left
- if (ex instanceof RemoteException) {
- ex = RemoteExceptionHandler.decodeRemoteException((RemoteException) ex);
- }
- throw ex;
- }
-
- } finally {
- if (scannerId != -1L) {
- try {
- server.close(scannerId);
- } catch (Exception ex) {
- LOG.warn(ex);
- }
- }
- }
-
- try {
- Thread.sleep(pause);
- } catch (InterruptedException e) {
- // continue
- }
- }
- LOG.info("Deleted " + Bytes.toString(tableName));
- }
-
- /**
- * Brings a table on-line (enables it)
- *
- * @param tableName name of the table
- * @throws IOException
- */
- public void enableTable(final Text tableName) throws IOException {
- enableTable(tableName.getBytes());
- }
-
- /**
- * Brings a table on-line (enables it)
- *
- * @param tableName name of the table
- * @throws IOException
- */
- public void enableTable(final String tableName) throws IOException {
- enableTable(Bytes.toBytes(tableName));
- }
-
- /**
- * Brings a table on-line (enables it)
- *
- * @param tableName name of the table
- * @throws IOException
- */
- public void enableTable(final byte [] tableName) throws IOException {
- if (this.master == null) {
- throw new MasterNotRunningException("master has been shut down");
- }
- try {
- this.master.enableTable(tableName);
- } catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
- }
-
- // Wait until all regions are enabled
-
- while (!isTableEnabled(tableName)) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Sleep. Waiting for all regions to be enabled from " +
- Bytes.toString(tableName));
- }
- try {
- Thread.sleep(pause);
-
- } catch (InterruptedException e) {
- // continue
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug("Wake. Waiting for all regions to be enabled from " +
- Bytes.toString(tableName));
- }
- }
- LOG.info("Enabled table " + Bytes.toString(tableName));
- }
-
- /**
- * Disables a table (takes it off-line) If it is being served, the master
- * will tell the servers to stop serving it.
- *
- * @param tableName name of table
- * @throws IOException
- */
- public void disableTable(final Text tableName) throws IOException {
- disableTable(tableName.getBytes());
- }
-
- /**
- * Disables a table (takes it off-line) If it is being served, the master
- * will tell the servers to stop serving it.
- *
- * @param tableName name of table
- * @throws IOException
- */
- public void disableTable(final String tableName) throws IOException {
- disableTable(Bytes.toBytes(tableName));
- }
-
- /**
- * Disables a table (takes it off-line) If it is being served, the master
- * will tell the servers to stop serving it.
- *
- * @param tableName name of table
- * @throws IOException
- */
- public void disableTable(final byte [] tableName) throws IOException {
- if (this.master == null) {
- throw new MasterNotRunningException("master has been shut down");
- }
- try {
- this.master.disableTable(tableName);
- } catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
- }
-
- // Wait until all regions are disabled
- while (isTableEnabled(tableName)) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Sleep. Waiting for all regions to be disabled from " +
- Bytes.toString(tableName));
- }
- try {
- Thread.sleep(pause);
- } catch (InterruptedException e) {
- // continue
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug("Wake. Waiting for all regions to be disabled from " +
- Bytes.toString(tableName));
- }
- }
- LOG.info("Disabled " + Bytes.toString(tableName));
- }
-
- /**
- * @param tableName name of table to check
- * @return true if table is on-line
- * @throws IOException
- */
- public boolean isTableEnabled(Text tableName) throws IOException {
- return isTableEnabled(tableName.getBytes());
- }
- /**
- * @param tableName name of table to check
- * @return true if table is on-line
- * @throws IOException
- */
- public boolean isTableEnabled(String tableName) throws IOException {
- return isTableEnabled(Bytes.toBytes(tableName));
- }
- /**
- * @param tableName name of table to check
- * @return true if table is on-line
- * @throws IOException
- */
- public boolean isTableEnabled(byte[] tableName) throws IOException {
- return connection.isTableEnabled(tableName);
- }
-
- /**
- * Add a column to an existing table
- *
- * @param tableName name of the table to add column to
- * @param column column descriptor of column to be added
- * @throws IOException
- */
- public void addColumn(final Text tableName, HColumnDescriptor column)
- throws IOException {
- addColumn(tableName.getBytes(), column);
- }
-
- /**
- * Add a column to an existing table
- *
- * @param tableName name of the table to add column to
- * @param column column descriptor of column to be added
- * @throws IOException
- */
- public void addColumn(final String tableName, HColumnDescriptor column)
- throws IOException {
- addColumn(Bytes.toBytes(tableName), column);
- }
-
- /**
- * Add a column to an existing table
- *
- * @param tableName name of the table to add column to
- * @param column column descriptor of column to be added
- * @throws IOException
- */
- public void addColumn(final byte [] tableName, HColumnDescriptor column)
- throws IOException {
- if (this.master == null) {
- throw new MasterNotRunningException("master has been shut down");
- }
- HTableDescriptor.isLegalTableName(tableName);
- try {
- this.master.addColumn(tableName, column);
- } catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
- }
- }
-
- /**
- * Delete a column from a table
- *
- * @param tableName name of table
- * @param columnName name of column to be deleted
- * @throws IOException
- */
- public void deleteColumn(final Text tableName, final Text columnName)
- throws IOException {
- deleteColumn(tableName.getBytes(), columnName.getBytes());
- }
-
- /**
- * Delete a column from a table
- *
- * @param tableName name of table
- * @param columnName name of column to be deleted
- * @throws IOException
- */
- public void deleteColumn(final String tableName, final String columnName)
- throws IOException {
- deleteColumn(Bytes.toBytes(tableName), Bytes.toBytes(columnName));
- }
-
- /**
- * Delete a column from a table
- *
- * @param tableName name of table
- * @param columnName name of column to be deleted
- * @throws IOException
- */
- public void deleteColumn(final byte [] tableName, final byte [] columnName)
- throws IOException {
- if (this.master == null) {
- throw new MasterNotRunningException("master has been shut down");
- }
- HTableDescriptor.isLegalTableName(tableName);
- try {
- this.master.deleteColumn(tableName, columnName);
- } catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
- }
- }
-
- /**
- * Modify an existing column family on a table
- *
- * @param tableName name of table
- * @param columnName name of column to be modified
- * @param descriptor new column descriptor to use
- * @throws IOException
- */
- public void modifyColumn(final Text tableName, final Text columnName,
- HColumnDescriptor descriptor)
- throws IOException {
- modifyColumn(tableName.getBytes(), columnName.getBytes(), descriptor);
- }
-
- /**
- * Modify an existing column family on a table
- *
- * @param tableName name of table
- * @param columnName name of column to be modified
- * @param descriptor new column descriptor to use
- * @throws IOException
- */
- public void modifyColumn(final String tableName, final String columnName,
- HColumnDescriptor descriptor)
- throws IOException {
- modifyColumn(Bytes.toBytes(tableName), Bytes.toBytes(columnName),
- descriptor);
- }
-
- /**
- * Modify an existing column family on a table
- *
- * @param tableName name of table
- * @param columnName name of column to be modified
- * @param descriptor new column descriptor to use
- * @throws IOException
- */
- public void modifyColumn(final byte [] tableName, final byte [] columnName,
- HColumnDescriptor descriptor)
- throws IOException {
- if (this.master == null) {
- throw new MasterNotRunningException("master has been shut down");
- }
- HTableDescriptor.isLegalTableName(tableName);
- try {
- this.master.modifyColumn(tableName, columnName, descriptor);
- } catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
- }
- }
-
- /**
- * Modify a table's HTableDescriptor
- *
- * @param tableName name of table
- * @param desc the updated descriptor
- * @throws IOException
- */
- public void modifyTableMeta(final byte [] tableName, HTableDescriptor desc)
- throws IOException {
- if (this.master == null) {
- throw new MasterNotRunningException("master has been shut down");
- }
- HTableDescriptor.isLegalTableName(tableName);
- try {
- this.master.modifyTableMeta(tableName, desc);
- } catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
- }
- }
-
- /**
- * Shuts down the HBase instance
- * @throws IOException
- */
- public synchronized void shutdown() throws IOException {
- if (this.master == null) {
- throw new MasterNotRunningException("master has been shut down");
- }
- try {
- this.master.shutdown();
- } catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
- } finally {
- this.master = null;
- }
- }
-
- private HRegionLocation getFirstMetaServerForTable(final byte [] tableName)
- throws IOException {
- return connection.locateRegion(HConstants.META_TABLE_NAME,
- HRegionInfo.createRegionName(tableName, null, HConstants.NINES));
- }
-
- /**
- * Check to see if HBase is running. Throw an exception if not.
- *
- * @param conf
- * @throws MasterNotRunningException
- */
- public static void checkHBaseAvailable(HBaseConfiguration conf)
- throws MasterNotRunningException {
- HBaseConfiguration copyOfConf = new HBaseConfiguration(conf);
- copyOfConf.setInt("hbase.client.retries.number", 1);
- new HBaseAdmin(copyOfConf);
- }
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.ipc.HMasterInterface;
+import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RemoteException;
+
+/**
+ * Provides administrative functions for HBase
+ */
+public class HBaseAdmin {
+ private final Log LOG = LogFactory.getLog(this.getClass().getName());
+ private final HConnection connection;
+ private final long pause;
+ private final int numRetries;
+ private volatile HMasterInterface master;
+
+ /**
+ * Constructor
+ *
+ * @param conf Configuration object
+ * @throws MasterNotRunningException
+ */
+ public HBaseAdmin(HBaseConfiguration conf) throws MasterNotRunningException {
+ this.connection = HConnectionManager.getConnection(conf);
+ this.pause = conf.getLong("hbase.client.pause", 30 * 1000);
+ this.numRetries = conf.getInt("hbase.client.retries.number", 5);
+ this.master = connection.getMaster();
+ }
+
+ /**
+ * @return proxy connection to master server for this instance
+ * @throws MasterNotRunningException
+ */
+ public HMasterInterface getMaster() throws MasterNotRunningException{
+ return this.connection.getMaster();
+ }
+
+ /** @return - true if the master server is running */
+ public boolean isMasterRunning() {
+ return this.connection.isMasterRunning();
+ }
+
+ /**
+ * @param tableName Table to check.
+ * @return True if table exists already.
+ * @throws MasterNotRunningException
+ */
+ public boolean tableExists(final Text tableName)
+ throws MasterNotRunningException {
+ return tableExists(tableName.getBytes());
+ }
+
+ /**
+ * @param tableName Table to check.
+ * @return True if table exists already.
+ * @throws MasterNotRunningException
+ */
+ public boolean tableExists(final String tableName)
+ throws MasterNotRunningException {
+ return tableExists(Bytes.toBytes(tableName));
+ }
+
+ /**
+ * @param tableName Table to check.
+ * @return True if table exists already.
+ * @throws MasterNotRunningException
+ */
+ public boolean tableExists(final byte [] tableName)
+ throws MasterNotRunningException {
+ if (this.master == null) {
+ throw new MasterNotRunningException("master has been shut down");
+ }
+ return connection.tableExists(tableName);
+ }
+
+ /**
+ * List all the userspace tables. In other words, scan the META table.
+ *
+ * If we wanted this to be really fast, we could implement a special
+ * catalog table that just contains table names and their descriptors.
+ * Right now, it only exists as part of the META table's region info.
+ *
+ * @return - returns an array of HTableDescriptors
+ * @throws IOException
+ */
+ public HTableDescriptor[] listTables() throws IOException {
+ return this.connection.listTables();
+ }
+
+ private long getPauseTime(int tries) {
+ if (tries >= HConstants.RETRY_BACKOFF.length)
+ tries = HConstants.RETRY_BACKOFF.length - 1;
+ return this.pause * HConstants.RETRY_BACKOFF[tries];
+ }
+
+ /**
+ * Creates a new table
+ *
+ * @param desc table descriptor for table
+ *
+ * @throws IllegalArgumentException if the table name is reserved
+ * @throws MasterNotRunningException if master is not running
+ * @throws TableExistsException if table already exists (If concurrent
+ * threads, the table may have been created between test-for-existence
+ * and attempt-at-creation).
+ * @throws IOException
+ */
+ public void createTable(HTableDescriptor desc)
+ throws IOException {
+ HTableDescriptor.isLegalTableName(desc.getName());
+ createTableAsync(desc);
+ for (int tries = 0; tries < numRetries; tries++) {
+ try {
+ // Wait for new table to come on-line
+ connection.locateRegion(desc.getName(), HConstants.EMPTY_START_ROW);
+ break;
+
+ } catch (TableNotFoundException e) {
+ if (tries == numRetries - 1) {
+ // Ran out of tries
+ throw e;
+ }
+ }
+ try {
+ Thread.sleep(getPauseTime(tries));
+ } catch (InterruptedException e) {
+ // continue
+ }
+ }
+ }
+
+ /**
+ * Creates a new table but does not block and wait for it to come online.
+ *
+ * @param desc table descriptor for table
+ *
+ * @throws IllegalArgumentException Bad table name.
+ * @throws MasterNotRunningException if master is not running
+ * @throws TableExistsException if table already exists (If concurrent
+ * threads, the table may have been created between test-for-existence
+ * and attempt-at-creation).
+ * @throws IOException
+ */
+ public void createTableAsync(HTableDescriptor desc)
+ throws IOException {
+ if (this.master == null) {
+ throw new MasterNotRunningException("master has been shut down");
+ }
+ HTableDescriptor.isLegalTableName(desc.getName());
+ try {
+ this.master.createTable(desc);
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
+ }
+
+ /**
+ * Deletes a table
+ *
+ * @param tableName name of table to delete
+ * @throws IOException
+ */
+ public void deleteTable(final Text tableName) throws IOException {
+ deleteTable(tableName.getBytes());
+ }
+
+ /**
+ * Deletes a table
+ *
+ * @param tableName name of table to delete
+ * @throws IOException
+ */
+ public void deleteTable(final String tableName) throws IOException {
+ deleteTable(Bytes.toBytes(tableName));
+ }
+
+ /**
+ * Deletes a table
+ *
+ * @param tableName name of table to delete
+ * @throws IOException
+ */
+ public void deleteTable(final byte [] tableName) throws IOException {
+ if (this.master == null) {
+ throw new MasterNotRunningException("master has been shut down");
+ }
+ HTableDescriptor.isLegalTableName(tableName);
+ HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
+ try {
+ this.master.deleteTable(tableName);
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
+
+ // Wait until first region is deleted
+ HRegionInterface server =
+ connection.getHRegionConnection(firstMetaServer.getServerAddress());
+ HRegionInfo info = new HRegionInfo();
+ for (int tries = 0; tries < numRetries; tries++) {
+ long scannerId = -1L;
+ try {
+ scannerId =
+ server.openScanner(firstMetaServer.getRegionInfo().getRegionName(),
+ HConstants.COL_REGIONINFO_ARRAY, tableName,
+ HConstants.LATEST_TIMESTAMP, null);
+ RowResult values = server.next(scannerId);
+ if (values == null || values.size() == 0) {
+ break;
+ }
+ boolean found = false;
+ for (Map.Entry<byte [], Cell> e: values.entrySet()) {
+ if (Bytes.equals(e.getKey(), HConstants.COL_REGIONINFO)) {
+ info = (HRegionInfo) Writables.getWritable(
+ e.getValue().getValue(), info);
+
+ if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
+ found = true;
+ }
+ }
+ }
+ if (!found) {
+ break;
+ }
+
+ } catch (IOException ex) {
+ if(tries == numRetries - 1) { // no more tries left
+ if (ex instanceof RemoteException) {
+ ex = RemoteExceptionHandler.decodeRemoteException((RemoteException) ex);
+ }
+ throw ex;
+ }
+
+ } finally {
+ if (scannerId != -1L) {
+ try {
+ server.close(scannerId);
+ } catch (Exception ex) {
+ LOG.warn(ex);
+ }
+ }
+ }
+
+ try {
+ Thread.sleep(getPauseTime(tries));
+ } catch (InterruptedException e) {
+ // continue
+ }
+ }
+ LOG.info("Deleted " + Bytes.toString(tableName));
+ }
+
+ /**
+ * Brings a table on-line (enables it)
+ *
+ * @param tableName name of the table
+ * @throws IOException
+ */
+ public void enableTable(final Text tableName) throws IOException {
+ enableTable(tableName.getBytes());
+ }
+
+ /**
+ * Brings a table on-line (enables it)
+ *
+ * @param tableName name of the table
+ * @throws IOException
+ */
+ public void enableTable(final String tableName) throws IOException {
+ enableTable(Bytes.toBytes(tableName));
+ }
+
+ /**
+ * Brings a table on-line (enables it)
+ *
+ * @param tableName name of the table
+ * @throws IOException
+ */
+ public void enableTable(final byte [] tableName) throws IOException {
+ if (this.master == null) {
+ throw new MasterNotRunningException("master has been shut down");
+ }
+ try {
+ this.master.enableTable(tableName);
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
+
+ // Wait until all regions are enabled
+
+ for (int tries = 0;
+ (tries < numRetries) && (!isTableEnabled(tableName));
+ tries++) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Sleep. Waiting for all regions to be enabled from " +
+ Bytes.toString(tableName));
+ }
+ try {
+ Thread.sleep(getPauseTime(tries));
+ } catch (InterruptedException e) {
+ // continue
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Wake. Waiting for all regions to be enabled from " +
+ Bytes.toString(tableName));
+ }
+ }
+ if (!isTableEnabled(tableName))
+ throw new IOException("unable to enable table " +
+ Bytes.toString(tableName));
+ LOG.info("Enabled table " + Bytes.toString(tableName));
+ }
+
+ /**
+ * Disables a table (takes it off-line) If it is being served, the master
+ * will tell the servers to stop serving it.
+ *
+ * @param tableName name of table
+ * @throws IOException
+ */
+ public void disableTable(final Text tableName) throws IOException {
+ disableTable(tableName.getBytes());
+ }
+
+ /**
+ * Disables a table (takes it off-line) If it is being served, the master
+ * will tell the servers to stop serving it.
+ *
+ * @param tableName name of table
+ * @throws IOException
+ */
+ public void disableTable(final String tableName) throws IOException {
+ disableTable(Bytes.toBytes(tableName));
+ }
+
+ /**
+ * Disables a table (takes it off-line) If it is being served, the master
+ * will tell the servers to stop serving it.
+ *
+ * @param tableName name of table
+ * @throws IOException
+ */
+ public void disableTable(final byte [] tableName) throws IOException {
+ if (this.master == null) {
+ throw new MasterNotRunningException("master has been shut down");
+ }
+ try {
+ this.master.disableTable(tableName);
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
+
+ // Wait until all regions are disabled
+ for (int tries = 0;
+ (tries < numRetries) && (isTableEnabled(tableName));
+ tries++) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Sleep. Waiting for all regions to be disabled from " +
+ Bytes.toString(tableName));
+ }
+ try {
+ Thread.sleep(getPauseTime(tries));
+ } catch (InterruptedException e) {
+ // continue
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Wake. Waiting for all regions to be disabled from " +
+ Bytes.toString(tableName));
+ }
+ }
+ if (isTableEnabled(tableName))
+ throw new IOException("unable to disable table " +
+ Bytes.toString(tableName));
+ LOG.info("Disabled " + Bytes.toString(tableName));
+ }
+
+ /**
+ * @param tableName name of table to check
+ * @return true if table is on-line
+ * @throws IOException
+ */
+ public boolean isTableEnabled(Text tableName) throws IOException {
+ return isTableEnabled(tableName.getBytes());
+ }
+ /**
+ * @param tableName name of table to check
+ * @return true if table is on-line
+ * @throws IOException
+ */
+ public boolean isTableEnabled(String tableName) throws IOException {
+ return isTableEnabled(Bytes.toBytes(tableName));
+ }
+ /**
+ * @param tableName name of table to check
+ * @return true if table is on-line
+ * @throws IOException
+ */
+ public boolean isTableEnabled(byte[] tableName) throws IOException {
+ return connection.isTableEnabled(tableName);
+ }
+
+ /**
+ * Add a column to an existing table
+ *
+ * @param tableName name of the table to add column to
+ * @param column column descriptor of column to be added
+ * @throws IOException
+ */
+ public void addColumn(final Text tableName, HColumnDescriptor column)
+ throws IOException {
+ addColumn(tableName.getBytes(), column);
+ }
+
+ /**
+ * Add a column to an existing table
+ *
+ * @param tableName name of the table to add column to
+ * @param column column descriptor of column to be added
+ * @throws IOException
+ */
+ public void addColumn(final String tableName, HColumnDescriptor column)
+ throws IOException {
+ addColumn(Bytes.toBytes(tableName), column);
+ }
+
+ /**
+ * Add a column to an existing table
+ *
+ * @param tableName name of the table to add column to
+ * @param column column descriptor of column to be added
+ * @throws IOException
+ */
+ public void addColumn(final byte [] tableName, HColumnDescriptor column)
+ throws IOException {
+ if (this.master == null) {
+ throw new MasterNotRunningException("master has been shut down");
+ }
+ HTableDescriptor.isLegalTableName(tableName);
+ try {
+ this.master.addColumn(tableName, column);
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
+ }
+
+ /**
+ * Delete a column from a table
+ *
+ * @param tableName name of table
+ * @param columnName name of column to be deleted
+ * @throws IOException
+ */
+ public void deleteColumn(final Text tableName, final Text columnName)
+ throws IOException {
+ deleteColumn(tableName.getBytes(), columnName.getBytes());
+ }
+
+ /**
+ * Delete a column from a table
+ *
+ * @param tableName name of table
+ * @param columnName name of column to be deleted
+ * @throws IOException
+ */
+ public void deleteColumn(final String tableName, final String columnName)
+ throws IOException {
+ deleteColumn(Bytes.toBytes(tableName), Bytes.toBytes(columnName));
+ }
+
+ /**
+ * Delete a column from a table
+ *
+ * @param tableName name of table
+ * @param columnName name of column to be deleted
+ * @throws IOException
+ */
+ public void deleteColumn(final byte [] tableName, final byte [] columnName)
+ throws IOException {
+ if (this.master == null) {
+ throw new MasterNotRunningException("master has been shut down");
+ }
+ HTableDescriptor.isLegalTableName(tableName);
+ try {
+ this.master.deleteColumn(tableName, columnName);
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
+ }
+
+ /**
+ * Modify an existing column family on a table
+ *
+ * @param tableName name of table
+ * @param columnName name of column to be modified
+ * @param descriptor new column descriptor to use
+ * @throws IOException
+ */
+ public void modifyColumn(final Text tableName, final Text columnName,
+ HColumnDescriptor descriptor)
+ throws IOException {
+ modifyColumn(tableName.getBytes(), columnName.getBytes(), descriptor);
+ }
+
+ /**
+ * Modify an existing column family on a table
+ *
+ * @param tableName name of table
+ * @param columnName name of column to be modified
+ * @param descriptor new column descriptor to use
+ * @throws IOException
+ */
+ public void modifyColumn(final String tableName, final String columnName,
+ HColumnDescriptor descriptor)
+ throws IOException {
+ modifyColumn(Bytes.toBytes(tableName), Bytes.toBytes(columnName),
+ descriptor);
+ }
+
+ /**
+ * Modify an existing column family on a table
+ *
+ * @param tableName name of table
+ * @param columnName name of column to be modified
+ * @param descriptor new column descriptor to use
+ * @throws IOException
+ */
+ public void modifyColumn(final byte [] tableName, final byte [] columnName,
+ HColumnDescriptor descriptor)
+ throws IOException {
+ if (this.master == null) {
+ throw new MasterNotRunningException("master has been shut down");
+ }
+ HTableDescriptor.isLegalTableName(tableName);
+ try {
+ this.master.modifyColumn(tableName, columnName, descriptor);
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
+ }
+
+ /**
+ * Modify a table's HTableDescriptor
+ *
+ * @param tableName name of table
+ * @param desc the updated descriptor
+ * @throws IOException
+ */
+ public void modifyTableMeta(final byte [] tableName, HTableDescriptor desc)
+ throws IOException {
+ if (this.master == null) {
+ throw new MasterNotRunningException("master has been shut down");
+ }
+ HTableDescriptor.isLegalTableName(tableName);
+ try {
+ this.master.modifyTableMeta(tableName, desc);
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
+ }
+
+ /**
+ * Shuts down the HBase instance
+ * @throws IOException
+ */
+ public synchronized void shutdown() throws IOException {
+ if (this.master == null) {
+ throw new MasterNotRunningException("master has been shut down");
+ }
+ try {
+ this.master.shutdown();
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ } finally {
+ this.master = null;
+ }
+ }
+
+ private HRegionLocation getFirstMetaServerForTable(final byte [] tableName)
+ throws IOException {
+ return connection.locateRegion(HConstants.META_TABLE_NAME,
+ HRegionInfo.createRegionName(tableName, null, HConstants.NINES));
+ }
+
+ /**
+ * Check to see if HBase is running. Throw an exception if not.
+ *
+ * @param conf
+ * @throws MasterNotRunningException
+ */
+ public static void checkHBaseAvailable(HBaseConfiguration conf)
+ throws MasterNotRunningException {
+ HBaseConfiguration copyOfConf = new HBaseConfiguration(conf);
+ copyOfConf.setInt("hbase.client.retries.number", 1);
+ new HBaseAdmin(copyOfConf);
+ }
}
\ No newline at end of file