You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jm...@apache.org on 2013/02/21 04:38:08 UTC
svn commit: r1448506 [4/5] - in /hbase/trunk: ./
hbase-common/src/main/java/org/apache/hadoop/hbase/
hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/
hbase-protocol/src/main/protobuf/ hbase-server/src/main/avro/
hbase-server/src...
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java Thu Feb 21 03:38:05 2013
@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.coprocessor.*;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import java.io.IOException;
import java.util.List;
@@ -160,6 +161,7 @@ public class MasterCoprocessorHost
}
}
}
+
public void preDeleteTable(byte[] tableName) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
@@ -935,4 +937,146 @@ public class MasterCoprocessorHost
}
}
}
+
+ public void preSnapshot(final SnapshotDescription snapshot,
+ final HTableDescriptor hTableDescriptor) throws IOException {
+ ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+ for (MasterEnvironment env: coprocessors) {
+ if (env.getInstance() instanceof MasterObserver) {
+ ctx = ObserverContext.createAndPrepare(env, ctx);
+ try {
+ ((MasterObserver)env.getInstance()).preSnapshot(ctx, snapshot, hTableDescriptor);
+ } catch (Throwable e) {
+ handleCoprocessorThrowable(env, e);
+ }
+ if (ctx.shouldComplete()) {
+ break;
+ }
+ }
+ }
+ }
+
+ public void postSnapshot(final SnapshotDescription snapshot,
+ final HTableDescriptor hTableDescriptor) throws IOException {
+ ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+ for (MasterEnvironment env: coprocessors) {
+ if (env.getInstance() instanceof MasterObserver) {
+ ctx = ObserverContext.createAndPrepare(env, ctx);
+ try {
+ ((MasterObserver)env.getInstance()).postSnapshot(ctx, snapshot, hTableDescriptor);
+ } catch (Throwable e) {
+ handleCoprocessorThrowable(env, e);
+ }
+ if (ctx.shouldComplete()) {
+ break;
+ }
+ }
+ }
+ }
+
+ public void preCloneSnapshot(final SnapshotDescription snapshot,
+ final HTableDescriptor hTableDescriptor) throws IOException {
+ ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+ for (MasterEnvironment env: coprocessors) {
+ if (env.getInstance() instanceof MasterObserver) {
+ ctx = ObserverContext.createAndPrepare(env, ctx);
+ try {
+ ((MasterObserver)env.getInstance()).preCloneSnapshot(ctx, snapshot, hTableDescriptor);
+ } catch (Throwable e) {
+ handleCoprocessorThrowable(env, e);
+ }
+ if (ctx.shouldComplete()) {
+ break;
+ }
+ }
+ }
+ }
+
+ public void postCloneSnapshot(final SnapshotDescription snapshot,
+ final HTableDescriptor hTableDescriptor) throws IOException {
+ ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+ for (MasterEnvironment env: coprocessors) {
+ if (env.getInstance() instanceof MasterObserver) {
+ ctx = ObserverContext.createAndPrepare(env, ctx);
+ try {
+ ((MasterObserver)env.getInstance()).postCloneSnapshot(ctx, snapshot, hTableDescriptor);
+ } catch (Throwable e) {
+ handleCoprocessorThrowable(env, e);
+ }
+ if (ctx.shouldComplete()) {
+ break;
+ }
+ }
+ }
+ }
+
+ public void preRestoreSnapshot(final SnapshotDescription snapshot,
+ final HTableDescriptor hTableDescriptor) throws IOException {
+ ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+ for (MasterEnvironment env: coprocessors) {
+ if (env.getInstance() instanceof MasterObserver) {
+ ctx = ObserverContext.createAndPrepare(env, ctx);
+ try {
+ ((MasterObserver)env.getInstance()).preRestoreSnapshot(ctx, snapshot, hTableDescriptor);
+ } catch (Throwable e) {
+ handleCoprocessorThrowable(env, e);
+ }
+ if (ctx.shouldComplete()) {
+ break;
+ }
+ }
+ }
+ }
+
+ public void postRestoreSnapshot(final SnapshotDescription snapshot,
+ final HTableDescriptor hTableDescriptor) throws IOException {
+ ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+ for (MasterEnvironment env: coprocessors) {
+ if (env.getInstance() instanceof MasterObserver) {
+ ctx = ObserverContext.createAndPrepare(env, ctx);
+ try {
+ ((MasterObserver)env.getInstance()).postRestoreSnapshot(ctx, snapshot, hTableDescriptor);
+ } catch (Throwable e) {
+ handleCoprocessorThrowable(env, e);
+ }
+ if (ctx.shouldComplete()) {
+ break;
+ }
+ }
+ }
+ }
+
+ public void preDeleteSnapshot(final SnapshotDescription snapshot) throws IOException {
+ ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+ for (MasterEnvironment env: coprocessors) {
+ if (env.getInstance() instanceof MasterObserver) {
+ ctx = ObserverContext.createAndPrepare(env, ctx);
+ try {
+ ((MasterObserver)env.getInstance()).preDeleteSnapshot(ctx, snapshot);
+ } catch (Throwable e) {
+ handleCoprocessorThrowable(env, e);
+ }
+ if (ctx.shouldComplete()) {
+ break;
+ }
+ }
+ }
+ }
+
+ public void postDeleteSnapshot(final SnapshotDescription snapshot) throws IOException {
+ ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+ for (MasterEnvironment env: coprocessors) {
+ if (env.getInstance() instanceof MasterObserver) {
+ ctx = ObserverContext.createAndPrepare(env, ctx);
+ try {
+ ((MasterObserver)env.getInstance()).postDeleteSnapshot(ctx, snapshot);
+ } catch (Throwable e) {
+ handleCoprocessorThrowable(env, e);
+ }
+ if (ctx.shouldComplete()) {
+ break;
+ }
+ }
+ }
+ }
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java Thu Feb 21 03:38:05 2013
@@ -79,6 +79,8 @@ public class MasterFileSystem {
private final Path oldLogDir;
// root hbase directory on the FS
private final Path rootdir;
+ // hbase temp directory used for table construction and deletion
+ private final Path tempdir;
// create the split log lock
final Lock splitLogLock = new ReentrantLock();
final boolean distributedLogSplitting;
@@ -109,6 +111,7 @@ public class MasterFileSystem {
// default localfs. Presumption is that rootdir is fully-qualified before
// we get to here with appropriate fs scheme.
this.rootdir = FSUtils.getRootDir(conf);
+ this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
// Cover both bases, the old way of setting default fs and the new.
// We're supposed to run on 0.20 and 0.21 anyways.
this.fs = this.rootdir.getFileSystem(conf);
@@ -146,6 +149,9 @@ public class MasterFileSystem {
// check if the root directory exists
checkRootDir(this.rootdir, conf, this.fs);
+ // check if temp directory exists and clean it
+ checkTempDir(this.tempdir, conf, this.fs);
+
Path oldLogDir = new Path(this.rootdir, HConstants.HREGION_OLDLOGDIR_NAME);
// Make sure the region servers can archive their old logs
@@ -194,6 +200,13 @@ public class MasterFileSystem {
}
/**
+ * @return HBase temp dir.
+ */
+ public Path getTempDir() {
+ return this.tempdir;
+ }
+
+ /**
* @return The unique identifier generated for this cluster
*/
public ClusterId getClusterId() {
@@ -439,6 +452,32 @@ public class MasterFileSystem {
return rd;
}
+ /**
+ * Make sure the hbase temp directory exists and is empty.
+ * NOTE that this method is only executed once just after the master becomes the active one.
+ */
+ private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
+ throws IOException {
+ // If the temp directory exists, clear the content (left over, from the previous run)
+ if (fs.exists(tmpdir)) {
+ // Archive table in temp, maybe left over from failed deletion,
+ // if not the cleaner will take care of them.
+ for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
+ for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
+ HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
+ }
+ }
+ if (!fs.delete(tmpdir, true)) {
+ throw new IOException("Unable to clean the temp directory: " + tmpdir);
+ }
+ }
+
+ // Create the temp directory
+ if (!fs.mkdirs(tmpdir)) {
+ throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
+ }
+ }
+
private static void bootstrap(final Path rd, final Configuration c)
throws IOException {
LOG.info("BOOTSTRAP: creating ROOT and first META regions");
@@ -503,6 +542,37 @@ public class MasterFileSystem {
fs.delete(new Path(rootdir, Bytes.toString(tableName)), true);
}
+ /**
+ * Move the specified file/directory to the hbase temp directory.
+ * @param path The path of the file/directory to move
+ * @return The temp location of the file/directory moved
+ * @throws IOException in case of file-system failure
+ */
+ public Path moveToTemp(final Path path) throws IOException {
+ Path tempPath = new Path(this.tempdir, path.getName());
+
+ // Ensure temp exists
+ if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) {
+ throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
+ }
+
+ if (!fs.rename(path, tempPath)) {
+ throw new IOException("Unable to move '" + path + "' to temp '" + tempPath + "'");
+ }
+
+ return tempPath;
+ }
+
+ /**
+ * Move the specified table to the hbase temp directory
+ * @param tableName Table name to move
+ * @return The temp location of the table moved
+ * @throws IOException in case of file-system failure
+ */
+ public Path moveTableToTemp(byte[] tableName) throws IOException {
+ return moveToTemp(HTableDescriptor.getTableDir(this.rootdir, tableName));
+ }
+
public void updateRegionInfo(HRegionInfo region) {
// TODO implement this. i think this is currently broken in trunk i don't
// see this getting updated.
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java Thu Feb 21 03:38:05 2013
@@ -56,6 +56,11 @@ public interface MasterServices extends
public ExecutorService getExecutorService();
/**
+ * @return Master's instance of {@link MasterCoprocessorHost}
+ */
+ public MasterCoprocessorHost getCoprocessorHost();
+
+ /**
* Check table is modifiable; i.e. exists and is offline.
* @param tableName Name of table to check.
* @throws TableNotDisabledException
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java Thu Feb 21 03:38:05 2013
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master.h
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
@@ -36,6 +35,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
@@ -60,9 +61,9 @@ import org.apache.zookeeper.KeeperExcept
@InterfaceAudience.Private
public class CreateTableHandler extends EventHandler {
private static final Log LOG = LogFactory.getLog(CreateTableHandler.class);
- private MasterFileSystem fileSystemManager;
- private final HTableDescriptor hTableDescriptor;
- private Configuration conf;
+ protected final MasterFileSystem fileSystemManager;
+ protected final HTableDescriptor hTableDescriptor;
+ protected final Configuration conf;
private final AssignmentManager assignmentManager;
private final CatalogTracker catalogTracker;
private final HRegionInfo [] newRegions;
@@ -88,7 +89,9 @@ public class CreateTableHandler extends
}
} catch (InterruptedException e) {
LOG.warn("Interrupted waiting for meta availability", e);
- throw new IOException(e);
+ InterruptedIOException ie = new InterruptedIOException(e.getMessage());
+ ie.initCause(e);
+ throw ie;
}
String tableName = this.hTableDescriptor.getNameAsString();
@@ -133,28 +136,95 @@ public class CreateTableHandler extends
cpHost.preCreateTableHandler(this.hTableDescriptor, this.newRegions);
}
handleCreateTable(tableName);
+ completed(null);
if (cpHost != null) {
cpHost.postCreateTableHandler(this.hTableDescriptor, this.newRegions);
}
- } catch (IOException e) {
+ } catch (Throwable e) {
LOG.error("Error trying to create the table " + tableName, e);
+ completed(e);
+ }
+ }
+
+ /**
+ * Called after that process() is completed.
+ * @param exception null if process() is successful or not null if something has failed.
+ */
+ protected void completed(final Throwable exception) {
+ }
+
+ /**
+ * Responsible of table creation (on-disk and META) and assignment.
+ * - Create the table directory and descriptor (temp folder)
+ * - Create the on-disk regions (temp folder)
+ * [If something fails here: we've just some trash in temp]
+ * - Move the table from temp to the root directory
+ * [If something fails here: we've the table in place but some of the rows required
+ * present in META. (hbck needed)]
+ * - Add regions to META
+ * [If something fails here: we don't have regions assigned: table disabled]
+ * - Assign regions to Region Servers
+ * [If something fails here: we still have the table in disabled state]
+ * - Update ZooKeeper with the enabled state
+ */
+ private void handleCreateTable(String tableName) throws IOException, KeeperException {
+ Path tempdir = fileSystemManager.getTempDir();
+ FileSystem fs = fileSystemManager.getFileSystem();
+
+ // 1. Create Table Descriptor
+ FSTableDescriptors.createTableDescriptor(fs, tempdir, this.hTableDescriptor);
+ Path tempTableDir = new Path(tempdir, tableName);
+ Path tableDir = new Path(fileSystemManager.getRootDir(), tableName);
+
+ // 2. Create Regions
+ List<HRegionInfo> regionInfos = handleCreateHdfsRegions(tempdir, tableName);
+
+ // 3. Move Table temp directory to the hbase root location
+ if (!fs.rename(tempTableDir, tableDir)) {
+ throw new IOException("Unable to move table from temp=" + tempTableDir +
+ " to hbase root=" + tableDir);
+ }
+
+ if (regionInfos != null && regionInfos.size() > 0) {
+ // 4. Add regions to META
+ MetaEditor.addRegionsToMeta(this.catalogTracker, regionInfos);
+
+ // 5. Trigger immediate assignment of the regions in round-robin fashion
+ try {
+ assignmentManager.getRegionStates().createRegionStates(regionInfos);
+ assignmentManager.assign(regionInfos);
+ } catch (InterruptedException e) {
+ LOG.error("Caught " + e + " during round-robin assignment");
+ InterruptedIOException ie = new InterruptedIOException(e.getMessage());
+ ie.initCause(e);
+ throw ie;
+ }
+ }
+
+ // 6. Set table enabled flag up in zk.
+ try {
+ assignmentManager.getZKTable().setEnabledTable(tableName);
} catch (KeeperException e) {
- LOG.error("Error trying to create the table " + tableName, e);
+ throw new IOException("Unable to ensure that " + tableName + " will be" +
+ " enabled because of a ZooKeeper issue", e);
}
}
- private void handleCreateTable(String tableName) throws IOException,
- KeeperException {
+ /**
+ * Create the on-disk structure for the table, and returns the regions info.
+ * @param tableRootDir directory where the table is being created
+ * @param tableName name of the table under construction
+ * @return the list of regions created
+ */
+ protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir,
+ final String tableName)
+ throws IOException {
int regionNumber = newRegions.length;
ThreadPoolExecutor regionOpenAndInitThreadPool = getRegionOpenAndInitThreadPool(
"RegionOpenAndInitThread-" + tableName, regionNumber);
CompletionService<HRegion> completionService = new ExecutorCompletionService<HRegion>(
regionOpenAndInitThreadPool);
- // TODO: Currently we make the table descriptor and as side-effect the
- // tableDir is created. Should we change below method to be createTable
- // where we create table in tmp dir with its table descriptor file and then
- // do rename to move it into place?
- FSTableDescriptors.createTableDescriptor(this.hTableDescriptor, this.conf);
+
List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
for (final HRegionInfo newRegion : newRegions) {
completionService.submit(new Callable<HRegion>() {
@@ -162,9 +232,8 @@ public class CreateTableHandler extends
// 1. Create HRegion
HRegion region = HRegion.createHRegion(newRegion,
- fileSystemManager.getRootDir(), conf, hTableDescriptor, null,
+ tableRootDir, conf, hTableDescriptor, null,
false, true);
-
// 2. Close the new region to flush to disk. Close log file too.
region.close();
return region;
@@ -185,28 +254,8 @@ public class CreateTableHandler extends
} finally {
regionOpenAndInitThreadPool.shutdownNow();
}
- if (regionInfos.size() > 0) {
- MetaEditor.addRegionsToMeta(this.catalogTracker, regionInfos);
- }
- // 4. Trigger immediate assignment of the regions in round-robin fashion
- try {
- List<HRegionInfo> regions = Arrays.asList(newRegions);
- assignmentManager.getRegionStates().createRegionStates(regions);
- assignmentManager.assign(regions);
- } catch (InterruptedException ie) {
- LOG.error("Caught " + ie + " during round-robin assignment");
- throw new IOException(ie);
- }
-
- // 5. Set table enabled flag up in zk.
- try {
- assignmentManager.getZKTable().
- setEnabledTable(this.hTableDescriptor.getNameAsString());
- } catch (KeeperException e) {
- throw new IOException("Unable to ensure that the table will be" +
- " enabled because of a ZooKeeper issue", e);
- }
+ return regionInfos;
}
protected ThreadPoolExecutor getRegionOpenAndInitThreadPool(
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java Thu Feb 21 03:38:05 2013
@@ -24,12 +24,16 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
@@ -55,6 +59,8 @@ public class DeleteTableHandler extends
if (cpHost != null) {
cpHost.preDeleteTableHandler(this.tableName);
}
+
+ // 1. Wait because of region in transition
AssignmentManager am = this.masterServices.getAssignmentManager();
long waitTime = server.getConfiguration().
getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
@@ -71,21 +77,37 @@ public class DeleteTableHandler extends
waitTime + "ms) for region to leave region " +
region.getRegionNameAsString() + " in transitions");
}
- LOG.debug("Deleting region " + region.getRegionNameAsString() +
- " from META and FS");
- // Remove region from META
- MetaEditor.deleteRegion(this.server.getCatalogTracker(), region);
- // Delete region from FS
- this.masterServices.getMasterFileSystem().deleteRegion(region);
}
- // Delete table from FS
- this.masterServices.getMasterFileSystem().deleteTable(tableName);
- // Update table descriptor cache
- this.masterServices.getTableDescriptors().remove(Bytes.toString(tableName));
- // If entry for this table in zk, and up in AssignmentManager, remove it.
+ // 2. Remove regions from META
+ LOG.debug("Deleting regions from META");
+ MetaEditor.deleteRegions(this.server.getCatalogTracker(), regions);
+
+ // 3. Move the table in /hbase/.tmp
+ MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
+ Path tempTableDir = mfs.moveTableToTemp(tableName);
+
+ try {
+ // 4. Delete regions from FS (temp directory)
+ FileSystem fs = mfs.getFileSystem();
+ for (HRegionInfo hri: regions) {
+ LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
+ HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
+ tempTableDir, new Path(tempTableDir, hri.getEncodedName()));
+ }
+
+ // 5. Delete table from FS (temp directory)
+ if (!fs.delete(tempTableDir, true)) {
+ LOG.error("Couldn't delete " + tempTableDir);
+ }
+ } finally {
+ // 6. Update table descriptor cache
+ this.masterServices.getTableDescriptors().remove(Bytes.toString(tableName));
+
+ // 7. If entry for this table in zk, and up in AssignmentManager, remove it.
+ am.getZKTable().setDeletedTable(Bytes.toString(tableName));
+ }
- am.getZKTable().setDeletedTable(Bytes.toString(tableName));
if (cpHost != null) {
cpHost.postDeleteTableHandler(this.tableName);
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java Thu Feb 21 03:38:05 2013
@@ -184,6 +184,7 @@ public class DisableTableHandler extends
while (!server.isStopped() && remaining > 0) {
Thread.sleep(waitingTimeForEvents);
regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName);
+ LOG.debug("Disable waiting until done; " + remaining + " ms remaining; " + regions);
if (regions.isEmpty()) break;
remaining = timeout - (System.currentTimeMillis() - startTime);
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java Thu Feb 21 03:38:05 2013
@@ -163,12 +163,14 @@ public abstract class TableEventHandler
/**
+ * Gets a TableDescriptor from the masterServices. Can Throw exceptions.
+ *
* @return Table descriptor for this table
* @throws TableExistsException
* @throws FileNotFoundException
* @throws IOException
*/
- HTableDescriptor getTableDescriptor()
+ public HTableDescriptor getTableDescriptor()
throws FileNotFoundException, IOException {
final String name = Bytes.toString(tableName);
HTableDescriptor htd =
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java Thu Feb 21 03:38:05 2013
@@ -970,7 +970,7 @@ public final class ProtobufUtil {
* Convert a delete KeyValue type to protocol buffer DeleteType.
*
* @param type
- * @return a DeleteType
+ * @return protocol buffer DeleteType
* @throws IOException
*/
public static DeleteType toDeleteType(
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Thu Feb 21 03:38:05 2013
@@ -97,6 +97,7 @@ import org.apache.hadoop.hbase.client.Re
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.Filter;
@@ -113,6 +114,7 @@ import org.apache.hadoop.hbase.monitorin
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl.WriteEntry;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
@@ -120,6 +122,7 @@ import org.apache.hadoop.hbase.regionser
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.ClassSize;
@@ -134,7 +137,6 @@ import org.apache.hadoop.util.StringUtil
import org.cliffc.high_scale_lib.Counter;
import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.io.Closeables;
@@ -2450,8 +2452,71 @@ public class HRegion implements HeapSize
}
/**
- * Replaces any KV timestamps set to {@link HConstants#LATEST_TIMESTAMP}
- * with the provided current timestamp.
+ * Complete taking the snapshot on the region. Writes the region info and adds references to the
+ * working snapshot directory.
+ *
+ * TODO for api consistency, consider adding another version with no {@link ForeignExceptionSnare}
+ * arg. (In the future other cancellable HRegion methods could eventually add a
+ * {@link ForeignExceptionSnare}, or we could do something fancier).
+ *
+ * @param desc snasphot description object
+ * @param exnSnare ForeignExceptionSnare that captures external exeptions in case we need to
+ * bail out. This is allowed to be null and will just be ignored in that case.
+ * @throws IOException if there is an external or internal error causing the snapshot to fail
+ */
+ public void addRegionToSnapshot(SnapshotDescription desc,
+ ForeignExceptionSnare exnSnare) throws IOException {
+ // This should be "fast" since we don't rewrite store files but instead
+ // back up the store files by creating a reference
+ Path rootDir = FSUtils.getRootDir(this.rsServices.getConfiguration());
+ Path snapshotRegionDir = TakeSnapshotUtils.getRegionSnapshotDirectory(desc, rootDir,
+ regionInfo.getEncodedName());
+
+ // 1. dump region meta info into the snapshot directory
+ LOG.debug("Storing region-info for snapshot.");
+ checkRegioninfoOnFilesystem(snapshotRegionDir);
+
+ // 2. iterate through all the stores in the region
+ LOG.debug("Creating references for hfiles");
+
+ // This ensures that we have an atomic view of the directory as long as we have < ls limit
+ // (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in
+ // batches and may miss files being added/deleted. This could be more robust (iteratively
+ // checking to see if we have all the files until we are sure), but the limit is currently 1000
+ // files/batch, far more than the number of store files under a single column family.
+ for (Store store : stores.values()) {
+ // 2.1. build the snapshot reference directory for the store
+ Path dstStoreDir = TakeSnapshotUtils.getStoreSnapshotDirectory(snapshotRegionDir,
+ Bytes.toString(store.getFamily().getName()));
+ List<StoreFile> storeFiles = new ArrayList<StoreFile>(store.getStorefiles());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
+ }
+
+ // 2.2. iterate through all the store's files and create "references".
+ int sz = storeFiles.size();
+ for (int i = 0; i < sz; i++) {
+ if (exnSnare != null) {
+ exnSnare.rethrowException();
+ }
+ Path file = storeFiles.get(i).getPath();
+ // create "reference" to this store file. It is intentionally an empty file -- all
+ // necessary infomration is captured by its fs location and filename. This allows us to
+ // only figure out what needs to be done via a single nn operation (instead of having to
+ // open and read the files as well).
+ LOG.debug("Creating reference for file (" + (i+1) + "/" + sz + ") : " + file);
+ Path referenceFile = new Path(dstStoreDir, file.getName());
+ boolean success = fs.createNewFile(referenceFile);
+ if (!success) {
+ throw new IOException("Failed to create reference file:" + referenceFile);
+ }
+ }
+ }
+ }
+
+ /**
+ * Replaces any KV timestamps set to {@link HConstants#LATEST_TIMESTAMP} with the provided current
+ * timestamp.
*/
void updateKVTimestamps(
final Iterable<List<KeyValue>> keyLists, final byte[] now) {
@@ -4373,10 +4438,10 @@ public class HRegion implements HeapSize
}
// delete out the 'A' region
- HFileArchiver.archiveRegion(a.getBaseConf(), fs,
+ HFileArchiver.archiveRegion(fs,
FSUtils.getRootDir(a.getBaseConf()), a.getTableDir(), a.getRegionDir());
// delete out the 'B' region
- HFileArchiver.archiveRegion(a.getBaseConf(), fs,
+ HFileArchiver.archiveRegion(fs,
FSUtils.getRootDir(b.getBaseConf()), b.getTableDir(), b.getRegionDir());
LOG.info("merge completed. New region is " + dstRegion);
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Thu Feb 21 03:38:05 2013
@@ -178,6 +178,7 @@ import org.apache.hadoop.hbase.regionser
import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
import org.apache.hadoop.hbase.regionserver.handler.OpenRootHandler;
+import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
@@ -425,6 +426,9 @@ public class HRegionServer implements Cl
private RegionServerCoprocessorHost rsHost;
+ /** Handle all the snapshot requests to this server */
+ RegionServerSnapshotManager snapshotManager;
+
/**
* Starts a HRegionServer at the default location
*
@@ -763,6 +767,13 @@ public class HRegionServer implements Cl
} catch (KeeperException e) {
this.abort("Failed to retrieve Cluster ID",e);
}
+
+ // watch for snapshots
+ try {
+ this.snapshotManager = new RegionServerSnapshotManager(this);
+ } catch (KeeperException e) {
+ this.abort("Failed to reach zk cluster when creating snapshot handler.");
+ }
}
/**
@@ -849,6 +860,9 @@ public class HRegionServer implements Cl
}
}
+ // start the snapshot handler, since the server is ready to run
+ this.snapshotManager.start();
+
// We registered with the Master. Go into run mode.
long lastMsg = 0;
long oldRequestCount = -1;
@@ -930,6 +944,12 @@ public class HRegionServer implements Cl
this.healthCheckChore.interrupt();
}
+ try {
+ if (snapshotManager != null) snapshotManager.stop(this.abortRequested);
+ } catch (IOException e) {
+ LOG.warn("Failed to close snapshot handler cleanly", e);
+ }
+
if (this.killed) {
// Just skip out w/o closing regions. Used when testing.
} else if (abortRequested) {
@@ -946,6 +966,13 @@ public class HRegionServer implements Cl
// handlers are stuck waiting on meta or root.
if (this.catalogTracker != null) this.catalogTracker.stop();
+ // stop the snapshot handler, forcefully killing all running tasks
+ try {
+ if (snapshotManager != null) snapshotManager.stop(this.abortRequested || this.killed);
+ } catch (IOException e) {
+ LOG.warn("Failed to close snapshot handler cleanly", e);
+ }
+
// Closing the compactSplit thread before closing meta regions
if (!this.killed && containsMetaTableRegions()) {
if (!abortRequested || this.fsOk) {
@@ -3745,7 +3772,8 @@ public class HRegionServer implements Cl
*
* @param region
* @param mutate
- * @return the Result
+ * @return result to return to client if default operation should be
+ * bypassed as indicated by RegionObserver, null otherwise
* @throws IOException
*/
protected Result append(final HRegion region,
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Thu Feb 21 03:38:05 2013
@@ -46,24 +46,24 @@ import org.apache.hadoop.hbase.HDFSBlock
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.KVComparator;
import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.fs.HFileSystem;
+import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.HalfStoreFileReader;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.BlockType;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.io.hfile.HFileWriterV2;
-import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
-import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.BloomFilter;
import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.hbase.util.BloomFilterWriter;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.RawComparator;
@@ -183,13 +183,26 @@ public class StoreFile {
*/
private Map<byte[], byte[]> metadataMap;
- /*
- * Regex that will work for straight filenames and for reference names.
- * If reference, then the regex has more than just one group. Group 1 is
- * this files id. Group 2 the referenced region name, etc.
+ /**
+ * A non-capture group, for hfiles, so that this can be embedded.
+ * HFiles are uuid ([0-9a-z]+). Bulk loaded hfiles has (_SeqId_[0-9]+_) has suffix.
+ */
+ public static final String HFILE_NAME_REGEX = "[0-9a-f]+(?:_SeqId_[0-9]+_)?";
+
+ /** Regex that will work for hfiles */
+ private static final Pattern HFILE_NAME_PATTERN =
+ Pattern.compile("^(" + HFILE_NAME_REGEX + ")");
+
+ /**
+ * Regex that will work for straight reference names (<hfile>.<parentEncRegion>)
+ * and hfilelink reference names (<table>=<region>-<hfile>.<parentEncRegion>)
+ * If reference, then the regex has more than just one group.
+ * Group 1, hfile/hfilelink pattern, is this file's id.
+ * Group 2 '(.+)' is the reference's parent region name.
*/
- private static final Pattern REF_NAME_PARSER =
- Pattern.compile("^([0-9a-f]+(?:_SeqId_[0-9]+_)?)(?:\\.(.+))?$");
+ private static final Pattern REF_NAME_PATTERN =
+ Pattern.compile(String.format("^(%s|%s)\\.(.+)$",
+ HFILE_NAME_REGEX, HFileLink.LINK_NAME_REGEX));
// StoreFile.Reader
private volatile Reader reader;
@@ -239,7 +252,13 @@ public class StoreFile {
} else if (isReference(p)) {
this.reference = Reference.read(fs, p);
this.referencePath = getReferredToFile(this.path);
- LOG.debug("Store file " + p + " is a reference");
+ if (HFileLink.isHFileLink(this.referencePath)) {
+ this.link = new HFileLink(conf, this.referencePath);
+ }
+ LOG.debug("Store file " + p + " is a " + reference.getFileRegion() +
+ " reference to " + this.referencePath);
+ } else if (!isHFile(p)) {
+ throw new IOException("path=" + path + " doesn't look like a valid StoreFile");
}
if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
@@ -257,7 +276,6 @@ public class StoreFile {
} else {
this.modificationTimeStamp = 0;
}
-
}
/**
@@ -286,7 +304,12 @@ public class StoreFile {
* @return <tt>true</tt> if this StoreFile is an HFileLink
*/
boolean isLink() {
- return this.link != null;
+ return this.link != null && this.reference == null;
+ }
+
+ private static boolean isHFile(final Path path) {
+ Matcher m = HFILE_NAME_PATTERN.matcher(path.getName());
+ return m.matches() && m.groupCount() > 0;
}
/**
@@ -294,22 +317,16 @@ public class StoreFile {
* @return True if the path has format of a HStoreFile reference.
*/
public static boolean isReference(final Path p) {
- return !p.getName().startsWith("_") &&
- isReference(p, REF_NAME_PARSER.matcher(p.getName()));
+ return isReference(p.getName());
}
/**
- * @param p Path to check.
- * @param m Matcher to use.
+ * @param name file name to check.
* @return True if the path has format of a HStoreFile reference.
*/
- public static boolean isReference(final Path p, final Matcher m) {
- if (m == null || !m.matches()) {
- LOG.warn("Failed match of store file name " + p.toString());
- throw new RuntimeException("Failed match of store file name " +
- p.toString());
- }
- return m.groupCount() > 1 && m.group(2) != null;
+ public static boolean isReference(final String name) {
+ Matcher m = REF_NAME_PATTERN.matcher(name);
+ return m.matches() && m.groupCount() > 1;
}
/*
@@ -317,20 +334,23 @@ public class StoreFile {
* hierarchy of <code>${hbase.rootdir}/tablename/regionname/familyname</code>.
* @param p Path to a Reference file.
* @return Calculated path to parent region file.
- * @throws IOException
+ * @throws IllegalArgumentException when path regex fails to match.
*/
public static Path getReferredToFile(final Path p) {
- Matcher m = REF_NAME_PARSER.matcher(p.getName());
+ Matcher m = REF_NAME_PATTERN.matcher(p.getName());
if (m == null || !m.matches()) {
LOG.warn("Failed match of store file name " + p.toString());
- throw new RuntimeException("Failed match of store file name " +
+ throw new IllegalArgumentException("Failed match of store file name " +
p.toString());
}
+
// Other region name is suffix on the passed Reference file name
String otherRegion = m.group(2);
// Tabledir is up two directories from where Reference was written.
Path tableDir = p.getParent().getParent().getParent();
String nameStrippedOfSuffix = m.group(1);
+ LOG.debug("reference '" + p + "' to region=" + otherRegion + " hfile=" + nameStrippedOfSuffix);
+
// Build up new path with the referenced region in place of our current
// region in the reference path. Also strip regionname suffix from name.
return new Path(new Path(new Path(tableDir, otherRegion),
@@ -437,16 +457,15 @@ public class StoreFile {
* If this estimate isn't good enough, we can improve it later.
* @param fs The FileSystem
* @param reference The reference
- * @param reference The referencePath
+ * @param status The reference FileStatus
* @return HDFS blocks distribution
*/
static private HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(
- FileSystem fs, Reference reference, Path referencePath) throws IOException {
- if ( referencePath == null) {
+ FileSystem fs, Reference reference, FileStatus status) throws IOException {
+ if (status == null) {
return null;
}
- FileStatus status = fs.getFileStatus(referencePath);
long start = 0;
long length = 0;
@@ -461,34 +480,18 @@ public class StoreFile {
}
/**
- * helper function to compute HDFS blocks distribution of a given file.
- * For reference file, it is an estimate
- * @param fs The FileSystem
- * @param p The path of the file
- * @return HDFS blocks distribution
- */
- static public HDFSBlocksDistribution computeHDFSBlockDistribution(
- FileSystem fs, Path p) throws IOException {
- if (isReference(p)) {
- Reference reference = Reference.read(fs, p);
- Path referencePath = getReferredToFile(p);
- return computeRefFileHDFSBlockDistribution(fs, reference, referencePath);
- } else {
- if (HFileLink.isHFileLink(p)) p = HFileLink.getReferencedPath(fs, p);
- FileStatus status = fs.getFileStatus(p);
- long length = status.getLen();
- return FSUtils.computeHDFSBlocksDistribution(fs, status, 0, length);
- }
- }
-
-
- /**
* compute HDFS block distribution, for reference file, it is an estimate
*/
private void computeHDFSBlockDistribution() throws IOException {
if (isReference()) {
+ FileStatus status;
+ if (this.link != null) {
+ status = this.link.getFileStatus(fs);
+ } else {
+ status = fs.getFileStatus(this.referencePath);
+ }
this.hdfsBlocksDistribution = computeRefFileHDFSBlockDistribution(
- this.fs, this.reference, this.referencePath);
+ this.fs, this.reference, status);
} else {
FileStatus status;
if (isLink()) {
@@ -513,13 +516,17 @@ public class StoreFile {
throw new IllegalAccessError("Already open");
}
if (isReference()) {
- this.reader = new HalfStoreFileReader(this.fs, this.referencePath,
- this.cacheConf, this.reference,
- dataBlockEncoder.getEncodingInCache());
+ if (this.link != null) {
+ this.reader = new HalfStoreFileReader(this.fs, this.referencePath, this.link,
+ this.cacheConf, this.reference, dataBlockEncoder.getEncodingInCache());
+ } else {
+ this.reader = new HalfStoreFileReader(this.fs, this.referencePath,
+ this.cacheConf, this.reference, dataBlockEncoder.getEncodingInCache());
+ }
} else if (isLink()) {
long size = link.getFileStatus(fs).getLen();
this.reader = new Reader(this.fs, this.path, link, size, this.cacheConf,
- dataBlockEncoder.getEncodingInCache(), true);
+ dataBlockEncoder.getEncodingInCache(), true);
} else {
this.reader = new Reader(this.fs, this.path, this.cacheConf,
dataBlockEncoder.getEncodingInCache());
@@ -901,6 +908,8 @@ public class StoreFile {
public static boolean validateStoreFileName(String fileName) {
if (HFileLink.isHFileLink(fileName))
return true;
+ if (isReference(fileName))
+ return true;
return !fileName.contains("-");
}
@@ -926,7 +935,7 @@ public class StoreFile {
Reference r =
top? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
// Add the referred-to regions name as a dot separated suffix.
- // See REF_NAME_PARSER regex above. The referred-to regions name is
+ // See REF_NAME_REGEX regex above. The referred-to regions name is
// up in the path of the passed in <code>f</code> -- parentdir is family,
// then the directory above is the region name.
String parentRegionName = f.getPath().getParent().getParent().getName();
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java Thu Feb 21 03:38:05 2013
@@ -59,7 +59,7 @@ public interface HLog {
*/
static final String RECOVERED_EDITS_DIR = "recovered.edits";
static final Pattern EDITFILES_NAME_PATTERN = Pattern.compile("-?[0-9]+");
- static final String RECOVERED_LOG_TMPFILE_SUFFIX = ".temp";
+ public static final String RECOVERED_LOG_TMPFILE_SUFFIX = ".temp";
public interface Reader {
void init(FileSystem fs, Path path, Configuration c) throws IOException;
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java Thu Feb 21 03:38:05 2013
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.master.Re
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
@@ -708,6 +709,55 @@ public class AccessController extends Ba
AccessControlLists.init(ctx.getEnvironment().getMasterServices());
}
+ @Override
+ public void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ requirePermission("snapshot", Permission.Action.ADMIN);
+ }
+
+ @Override
+ public void postSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ }
+
+ @Override
+ public void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ requirePermission("clone", Permission.Action.ADMIN);
+ }
+
+ @Override
+ public void postCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ }
+
+ @Override
+ public void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ requirePermission("restore", Permission.Action.ADMIN);
+ }
+
+ @Override
+ public void postRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ }
+
+ @Override
+ public void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot) throws IOException {
+ requirePermission("deleteSnapshot", Permission.Action.ADMIN);
+ }
+
+ @Override
+ public void postDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot) throws IOException {
+ }
/* ---- RegionObserver implementation ---- */
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java Thu Feb 21 03:38:05 2013
@@ -607,7 +607,25 @@ public class FSTableDescriptors implemen
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
HTableDescriptor htableDescriptor, boolean forceCreation)
throws IOException {
- FileStatus status = getTableInfoPath(fs, rootdir, htableDescriptor.getNameAsString());
+ Path tabledir = FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString());
+ return createTableDescriptorForTableDirectory(fs, tabledir, htableDescriptor, forceCreation);
+ }
+
+ /**
+ * Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
+ * a new table or snapshot a table.
+ * @param fs filesystem where the descriptor should be written
+ * @param tabledir directory under which we should write the file
+ * @param htableDescriptor description of the table to write
+ * @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
+ * be overwritten
+ * @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file
+ * already exists and we weren't forcing the descriptor creation.
+ * @throws IOException if a filesystem error occurs
+ */
+ public static boolean createTableDescriptorForTableDirectory(FileSystem fs, Path tabledir,
+ HTableDescriptor htableDescriptor, boolean forceCreation) throws IOException {
+ FileStatus status = getTableInfoPath(fs, tabledir);
if (status != null) {
LOG.info("Current tableInfoPath = " + status.getPath());
if (!forceCreation) {
@@ -617,8 +635,7 @@ public class FSTableDescriptors implemen
}
}
}
- Path p = writeTableDescriptor(fs, htableDescriptor,
- FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString()), status);
+ Path p = writeTableDescriptor(fs, htableDescriptor, tabledir, status);
return p != null;
}
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java Thu Feb 21 03:38:05 2013
@@ -947,6 +947,27 @@ public abstract class FSUtils {
}
/**
+ * A {@link PathFilter} that returns only regular files.
+ */
+ static class FileFilter implements PathFilter {
+ private final FileSystem fs;
+
+ public FileFilter(final FileSystem fs) {
+ this.fs = fs;
+ }
+
+ @Override
+ public boolean accept(Path p) {
+ try {
+ return fs.isFile(p);
+ } catch (IOException e) {
+ LOG.debug("unable to verify if path=" + p + " is a regular file", e);
+ return false;
+ }
+ }
+ }
+
+ /**
* A {@link PathFilter} that returns directories.
*/
public static class DirFilter implements PathFilter {
@@ -956,13 +977,14 @@ public abstract class FSUtils {
this.fs = fs;
}
+ @Override
public boolean accept(Path p) {
boolean isValid = false;
try {
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p.toString())) {
isValid = false;
} else {
- isValid = this.fs.getFileStatus(p).isDir();
+ isValid = fs.getFileStatus(p).isDir();
}
} catch (IOException e) {
LOG.warn("An error occurred while verifying if [" + p.toString() +
@@ -1311,19 +1333,6 @@ public abstract class FSUtils {
}
/**
- * Log the current state of the filesystem from a certain root directory
- * @param fs filesystem to investigate
- * @param root root file/directory to start logging from
- * @param LOG log to output information
- * @throws IOException if an unexpected exception occurs
- */
- public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
- throws IOException {
- LOG.debug("Current file system:");
- logFSTree(LOG, fs, root, "|-");
- }
-
- /**
* Throw an exception if an action is not permitted by a user on a file.
*
* @param ugi
@@ -1360,6 +1369,19 @@ public abstract class FSUtils {
}
/**
+ * Log the current state of the filesystem from a certain root directory
+ * @param fs filesystem to investigate
+ * @param root root file/directory to start logging from
+ * @param LOG log to output information
+ * @throws IOException if an unexpected exception occurs
+ */
+ public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
+ throws IOException {
+ LOG.debug("Current file system:");
+ logFSTree(LOG, fs, root, "|-");
+ }
+
+ /**
* Recursive helper to log the state of the FS
*
* @see #logFileSystemState(FileSystem, Path, Log)
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java Thu Feb 21 03:38:05 2013
@@ -99,6 +99,24 @@ public class HFileArchiveUtil {
}
/**
+ * Get the archive directory for a given region under the specified table
+ * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
+ * the archive path)
+ * @param tabledir the original table directory. Cannot be null.
+ * @param regiondir the path to the region directory. Cannot be null.
+ * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
+ * should not be archived
+ */
+ public static Path getRegionArchiveDir(Path rootdir, Path tabledir, Path regiondir) {
+ // get the archive directory for a table
+ Path archiveDir = getTableArchivePath(rootdir, tabledir.getName());
+
+ // then add on the region path under the archive
+ String encodedRegionName = regiondir.getName();
+ return HRegion.getRegionDir(archiveDir, encodedRegionName);
+ }
+
+ /**
* Get the path to the table archive directory based on the configured archive directory.
* <p>
* Get the path to the table's archive directory.
@@ -109,7 +127,22 @@ public class HFileArchiveUtil {
*/
public static Path getTableArchivePath(Path tabledir) {
Path root = tabledir.getParent();
- return new Path(new Path(root,HConstants.HFILE_ARCHIVE_DIRECTORY), tabledir.getName());
+ return getTableArchivePath(root, tabledir.getName());
+ }
+
+ /**
+ * Get the path to the table archive directory based on the configured archive directory.
+ * <p>
+ * Get the path to the table's archive directory.
+ * <p>
+ * Generally of the form: /hbase/.archive/[tablename]
+ * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
+ * the archive path)
+ * @param tableName Name of the table to be archived. Cannot be null.
+ * @return {@link Path} to the archive directory for the table
+ */
+ public static Path getTableArchivePath(final Path rootdir, final String tableName) {
+ return new Path(getArchivePath(rootdir), tableName);
}
/**
@@ -133,6 +166,16 @@ public class HFileArchiveUtil {
* @throws IOException if an unexpected error occurs
*/
public static Path getArchivePath(Configuration conf) throws IOException {
- return new Path(FSUtils.getRootDir(conf), HConstants.HFILE_ARCHIVE_DIRECTORY);
+ return getArchivePath(FSUtils.getRootDir(conf));
+ }
+
+ /**
+ * Get the full path to the archive directory on the configured {@link FileSystem}
+ * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
+ * the archive path)
+ * @return the full {@link Path} to the archive directory, as defined by the configuration
+ */
+ private static Path getArchivePath(final Path rootdir) {
+ return new Path(rootdir, HConstants.HFILE_ARCHIVE_DIRECTORY);
}
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java Thu Feb 21 03:38:05 2013
@@ -235,22 +235,25 @@ public class JVMClusterUtil {
}
}
// Do active after.
- if (activeMaster != null) activeMaster.master.shutdown();
+ if (activeMaster != null)
+ activeMaster.master.shutdown();
+
}
- boolean noWait = false;
+ boolean wasInterrupted = false;
final long maxTime = System.currentTimeMillis() + 120 * 1000;
if (regionservers != null) {
+ // first try nicely.
for (RegionServerThread t : regionservers) {
t.getRegionServer().stop("Shutdown requested");
}
for (RegionServerThread t : regionservers) {
- if (t.isAlive() && !noWait && System.currentTimeMillis() < maxTime) {
+ if (t.isAlive() && !wasInterrupted && System.currentTimeMillis() < maxTime) {
try {
t.join(maxTime);
} catch (InterruptedException e) {
LOG.info("Got InterruptedException on shutdown - " +
"not waiting anymore on region server ends", e);
- noWait = true; // someone wants us to speed up.
+ wasInterrupted = true; // someone wants us to speed up.
}
}
}
@@ -260,14 +263,16 @@ public class JVMClusterUtil {
for (RegionServerThread t : regionservers) {
if (t.isAlive()) {
try {
+ LOG.warn("RegionServerThreads remaining, give one more chance before interrupting");
t.join(10);
} catch (InterruptedException e) {
- noWait = true;
+ wasInterrupted = true;
}
}
}
for (RegionServerThread t : regionservers) {
if (t.isAlive()) {
+ LOG.warn("RegionServerThreads taking too long to stop, interrupting");
t.interrupt();
}
}
@@ -278,7 +283,7 @@ public class JVMClusterUtil {
if (masters != null) {
for (JVMClusterUtil.MasterThread t : masters) {
- while (t.master.isAlive() && !noWait) {
+ while (t.master.isAlive() && !wasInterrupted) {
try {
// The below has been replaced to debug sometime hangs on end of
// tests.
@@ -287,7 +292,7 @@ public class JVMClusterUtil {
} catch(InterruptedException e) {
LOG.info("Got InterruptedException on shutdown - " +
"not waiting anymore on master ends", e);
- noWait = true;
+ wasInterrupted = true;
}
}
}
@@ -295,9 +300,9 @@ public class JVMClusterUtil {
LOG.info("Shutdown of " +
((masters != null) ? masters.size() : "0") + " master(s) and " +
((regionservers != null) ? regionservers.size() : "0") +
- " regionserver(s) " + (noWait ? "interrupted" : "complete"));
+ " regionserver(s) " + (wasInterrupted ? "interrupted" : "complete"));
- if (!noWait){
+ if (wasInterrupted){
Thread.currentThread().interrupt();
}
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java Thu Feb 21 03:38:05 2013
@@ -26,18 +26,15 @@ import java.net.InetSocketAddress;
import java.net.Socket;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
-import java.util.Properties;
-import java.util.HashMap;
import java.util.Map;
+import java.util.Properties;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.authentication.util.KerberosUtil;
-
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -52,17 +49,19 @@ import org.apache.hadoop.hbase.util.Thre
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.CreateAndFailSilent;
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.DeleteNodeFailSilent;
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.SetData;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.apache.zookeeper.AsyncCallback;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.Op;
import org.apache.zookeeper.KeeperException.NoNodeException;
+import org.apache.zookeeper.Op;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.client.ZooKeeperSaslClient;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
-import org.apache.zookeeper.client.ZooKeeperSaslClient;
import org.apache.zookeeper.proto.CreateRequest;
import org.apache.zookeeper.proto.DeleteRequest;
import org.apache.zookeeper.proto.SetDataRequest;
@@ -862,6 +861,10 @@ public class ZKUtil {
/**
* Set data into node creating node if it doesn't yet exist.
* Does not set watch.
+ *
+ * WARNING: this is not atomic -- it is possible to get a 0-byte data value in the znode before
+ * data is written
+ *
* @param zkw zk reference
* @param znode path of node
* @param data data to set for node
@@ -1080,7 +1083,7 @@ public class ZKUtil {
}
/**
- * Creates the specified node, if the node does not exist. Does not set a
+ * Creates the specified node, iff the node does not exist. Does not set a
* watch and fails silently if the node already exists.
*
* The node created is persistent and open access.
@@ -1091,10 +1094,27 @@ public class ZKUtil {
*/
public static void createAndFailSilent(ZooKeeperWatcher zkw,
String znode) throws KeeperException {
- createAndFailSilent(zkw,
- (CreateAndFailSilent)ZKUtilOp.createAndFailSilent(znode, new byte[0]));
+ createAndFailSilent(zkw, znode, new byte[0]);
}
+ /**
+ * Creates the specified node containing specified data, iff the node does not exist. Does
+ * not set a watch and fails silently if the node already exists.
+ *
+ * The node created is persistent and open access.
+ *
+ * @param zkw zk reference
+ * @param znode path of node
+ * @param data a byte array data to store in the znode
+ * @throws KeeperException if unexpected zookeeper exception
+ */
+ public static void createAndFailSilent(ZooKeeperWatcher zkw,
+ String znode, byte[] data)
+ throws KeeperException {
+ createAndFailSilent(zkw,
+ (CreateAndFailSilent)ZKUtilOp.createAndFailSilent(znode, data));
+ }
+
private static void createAndFailSilent(ZooKeeperWatcher zkw, CreateAndFailSilent cafs)
throws KeeperException {
CreateRequest create = (CreateRequest)toZooKeeperOp(zkw, cafs).toRequestRecord();
@@ -1133,11 +1153,29 @@ public class ZKUtil {
*/
public static void createWithParents(ZooKeeperWatcher zkw, String znode)
throws KeeperException {
+ createWithParents(zkw, znode, new byte[0]);
+ }
+
+ /**
+ * Creates the specified node and all parent nodes required for it to exist. The creation of
+ * parent znodes is not atomic with the leafe znode creation but the data is written atomically
+ * when the leaf node is created.
+ *
+ * No watches are set and no errors are thrown if the node already exists.
+ *
+ * The nodes created are persistent and open access.
+ *
+ * @param zkw zk reference
+ * @param znode path of node
+ * @throws KeeperException if unexpected zookeeper exception
+ */
+ public static void createWithParents(ZooKeeperWatcher zkw, String znode, byte[] data)
+ throws KeeperException {
try {
if(znode == null) {
return;
}
- zkw.getRecoverableZooKeeper().create(znode, new byte[0], createACL(zkw, znode),
+ zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode),
CreateMode.PERSISTENT);
} catch(KeeperException.NodeExistsException nee) {
return;
@@ -1694,4 +1732,37 @@ public class ZKUtil {
ke.initCause(e);
return ke;
}
+
+ /**
+ * Recursively print the current state of ZK (non-transactional)
+ * @param root name of the root directory in zk to print
+ * @throws KeeperException
+ */
+ public static void logZKTree(ZooKeeperWatcher zkw, String root) {
+ if (!LOG.isDebugEnabled()) return;
+ LOG.debug("Current zk system:");
+ String prefix = "|-";
+ LOG.debug(prefix + root);
+ try {
+ logZKTree(zkw, root, prefix);
+ } catch (KeeperException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ /**
+ * Helper method to print the current state of the ZK tree.
+ * @see #logZKTree(ZooKeeperWatcher, String)
+ * @throws KeeperException if an unexpected exception occurs
+ */
+ protected static void logZKTree(ZooKeeperWatcher zkw, String root, String prefix) throws KeeperException {
+ List<String> children = ZKUtil.listChildrenNoWatch(zkw, root);
+ if (children == null) return;
+ for (String child : children) {
+ LOG.debug(prefix + child);
+ String node = ZKUtil.joinZNode(root.equals("/") ? "" : root, child);
+ logZKTree(zkw, node, prefix + "---");
+ }
+ }
+
}
Modified: hbase/trunk/hbase-server/src/main/resources/hbase-default.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/resources/hbase-default.xml?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/resources/hbase-default.xml (original)
+++ hbase/trunk/hbase-server/src/main/resources/hbase-default.xml Thu Feb 21 03:38:05 2013
@@ -952,6 +952,13 @@
</description>
</property>
<property>
+ <name>hbase.snapshot.enabled</name>
+ <value>true</value>
+ <description>
+ Set to true to allow snapshots to be taken / restored / cloned.
+ </description>
+ </property>
+ <property>
<name>hbase.rest.threads.max</name>
<value>100</value>
<description>
Modified: hbase/trunk/hbase-server/src/main/ruby/hbase/admin.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/ruby/hbase/admin.rb?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/ruby/hbase/admin.rb (original)
+++ hbase/trunk/hbase-server/src/main/ruby/hbase/admin.rb Thu Feb 21 03:38:05 2013
@@ -655,6 +655,42 @@ module Hbase
end
end
+ #----------------------------------------------------------------------------------------------
+ # Take a snapshot of specified table
+ def snapshot(table, snapshot_name)
+ @admin.snapshot(snapshot_name.to_java_bytes, table.to_java_bytes)
+ end
+
+ #----------------------------------------------------------------------------------------------
+ # Restore specified snapshot
+ def restore_snapshot(snapshot_name)
+ @admin.restoreSnapshot(snapshot_name.to_java_bytes)
+ end
+
+ #----------------------------------------------------------------------------------------------
+ # Create a new table by cloning the snapshot content
+ def clone_snapshot(snapshot_name, table)
+ @admin.cloneSnapshot(snapshot_name.to_java_bytes, table.to_java_bytes)
+ end
+
+ #----------------------------------------------------------------------------------------------
+ # Rename specified snapshot
+ def rename_snapshot(old_snapshot_name, new_snapshot_name)
+ @admin.renameSnapshot(old_snapshot_name.to_java_bytes, new_snapshot_name.to_java_bytes)
+ end
+
+ #----------------------------------------------------------------------------------------------
+ # Delete specified snapshot
+ def delete_snapshot(snapshot_name)
+ @admin.deleteSnapshot(snapshot_name.to_java_bytes)
+ end
+
+ #----------------------------------------------------------------------------------------------
+ # Returns a list of snapshots
+ def list_snapshot
+ @admin.getCompletedSnapshots
+ end
+
# Apply config specific to a table/column to its descriptor
def set_descriptor_config(descriptor, config)
raise(ArgumentError, "#{CONFIGURATION} must be a Hash type") unless config.kind_of?(Hash)
Modified: hbase/trunk/hbase-server/src/main/ruby/hbase/table.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/ruby/hbase/table.rb?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/ruby/hbase/table.rb (original)
+++ hbase/trunk/hbase-server/src/main/ruby/hbase/table.rb Thu Feb 21 03:38:05 2013
@@ -409,14 +409,14 @@ EOF
# Generally used for admin functions which just have one name and take the table name
def self.add_admin_utils(*args)
args.each do |method|
- define_method method do
- @shell.command(method, @name)
+ define_method method do |*method_args|
+ @shell.command(method, @name, *method_args)
end
end
end
#Add the following admin utilities to the table
- add_admin_utils :enable, :disable, :flush, :drop, :describe
+ add_admin_utils :enable, :disable, :flush, :drop, :describe, :snapshot
#----------------------------
#give the general help for the table
Modified: hbase/trunk/hbase-server/src/main/ruby/shell.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/ruby/shell.rb?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/ruby/shell.rb (original)
+++ hbase/trunk/hbase-server/src/main/ruby/shell.rb Thu Feb 21 03:38:05 2013
@@ -308,6 +308,19 @@ Shell.load_command_group(
)
Shell.load_command_group(
+ 'snapshot',
+ :full_name => 'CLUSTER SNAPSHOT TOOLS',
+ :commands => %w[
+ snapshot
+ clone_snapshot
+ restore_snapshot
+ rename_snapshot
+ delete_snapshot
+ list_snapshots
+ ]
+)
+
+Shell.load_command_group(
'security',
:full_name => 'SECURITY TOOLS',
:comment => "NOTE: Above commands are only applicable if running with the AccessController coprocessor",
Modified: hbase/trunk/hbase-server/src/main/ruby/shell/commands/list.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/ruby/shell/commands/list.rb?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/ruby/shell/commands/list.rb (original)
+++ hbase/trunk/hbase-server/src/main/ruby/shell/commands/list.rb Thu Feb 21 03:38:05 2013
@@ -40,6 +40,7 @@ EOF
end
formatter.footer(now, list.size)
+ return list
end
end
end
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Thu Feb 21 03:38:05 2013
@@ -1255,6 +1255,20 @@ public class HBaseTestingUtility extends
return count;
}
+ public int countRows(final HTable table, final byte[]... families) throws IOException {
+ Scan scan = new Scan();
+ for (byte[] family: families) {
+ scan.addFamily(family);
+ }
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (@SuppressWarnings("unused") Result res : results) {
+ count++;
+ }
+ results.close();
+ return count;
+ }
+
/**
* Return an md5 digest of the entire contents of a table.
*/
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java Thu Feb 21 03:38:05 2013
@@ -20,11 +20,16 @@ package org.apache.hadoop.hbase;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.IOException;
+import java.util.regex.Pattern;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
+import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -33,6 +38,8 @@ import org.junit.experimental.categories
*/
@Category(SmallTests.class)
public class TestHTableDescriptor {
+ final static Log LOG = LogFactory.getLog(TestHTableDescriptor.class);
+
@Test
public void testPb() throws DeserializationException, IOException {
HTableDescriptor htd = HTableDescriptor.META_TABLEDESC;
@@ -117,7 +124,45 @@ public class TestHTableDescriptor {
assertEquals(null, desc.getValue(key));
}
- /**
+ String legalTableNames[] = { "foo", "with-dash_under.dot", "_under_start_ok", };
+ String illegalTableNames[] = { ".dot_start_illegal", "-dash_start_illegal", "spaces not ok" };
+
+ @Test
+ public void testLegalHTableNames() {
+ for (String tn : legalTableNames) {
+ HTableDescriptor.isLegalTableName(Bytes.toBytes(tn));
+ }
+ }
+
+ @Test
+ public void testIllegalHTableNames() {
+ for (String tn : illegalTableNames) {
+ try {
+ HTableDescriptor.isLegalTableName(Bytes.toBytes(tn));
+ fail("invalid tablename " + tn + " should have failed");
+ } catch (Exception e) {
+ // expected
+ }
+ }
+ }
+
+ @Test
+ public void testLegalHTableNamesRegex() {
+ for (String tn : legalTableNames) {
+ LOG.info("Testing: '" + tn + "'");
+ assertTrue(Pattern.matches(HTableDescriptor.VALID_USER_TABLE_REGEX, tn));
+ }
+ }
+
+ @Test
+ public void testIllegalHTableNamesRegex() {
+ for (String tn : illegalTableNames) {
+ LOG.info("Testing: '" + tn + "'");
+ assertFalse(Pattern.matches(HTableDescriptor.VALID_USER_TABLE_REGEX, tn));
+ }
+ }
+
+ /**
* Test default value handling for maxFileSize
*/
@Test
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java?rev=1448506&r1=1448505&r2=1448506&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java Thu Feb 21 03:38:05 2013
@@ -37,9 +37,7 @@ import org.apache.hadoop.hbase.HBaseTest
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -360,7 +358,7 @@ public class TestHFileArchiving {
try {
// Try to archive the file
- HFileArchiver.archiveRegion(conf, fs, rootDir,
+ HFileArchiver.archiveRegion(fs, rootDir,
sourceRegionDir.getParent(), sourceRegionDir);
// The archiver succeded, the file is no longer in the original location