You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by mb...@apache.org on 2013/03/04 12:24:53 UTC
svn commit: r1452257 [1/14] - in /hbase/branches/0.94:
security/src/main/java/org/apache/hadoop/hbase/security/access/
security/src/test/java/org/apache/hadoop/hbase/security/access/
src/main/jamon/org/apache/hadoop/hbase/tmpl/master/ src/main/java/org...
Author: mbertozzi
Date: Mon Mar 4 11:24:50 2013
New Revision: 1452257
URL: http://svn.apache.org/r1452257
Log:
HBASE-7360 Backport Snapshots to 0.94
Added:
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/DaemonThreadFactory.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HLogLink.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/snapshot/
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/procedure/
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/procedure/SubprocedureFactory.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/protobuf/
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/protobuf/generated/
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ErrorHandlingProtos.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/CopyRecoveredEditsTask.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/CorruptedSnapshotException.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/HSnapshotDescription.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceServerWALsTask.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotException.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDoesNotExistException.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotTask.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/TakeSnapshotUtils.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
hbase/branches/0.94/src/main/protobuf/
hbase/branches/0.94/src/main/protobuf/ErrorHandling.proto
hbase/branches/0.94/src/main/protobuf/hbase.proto
hbase/branches/0.94/src/main/ruby/shell/commands/clone_snapshot.rb
hbase/branches/0.94/src/main/ruby/shell/commands/delete_snapshot.rb
hbase/branches/0.94/src/main/ruby/shell/commands/list_snapshots.rb
hbase/branches/0.94/src/main/ruby/shell/commands/restore_snapshot.rb
hbase/branches/0.94/src/main/ruby/shell/commands/snapshot.rb
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotsFromAdmin.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionDispatcher.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionSerialization.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestTimeoutExceptionInjector.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotLogCleaner.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/procedure/
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedure.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureCoordinator.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/snapshot/
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/snapshot/TestCopyRecoveredEditsTask.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/snapshot/TestReferenceRegionHFilesTask.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotLogSplitter.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotTask.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/snapshot/TestWALReferenceTask.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java
Modified:
hbase/branches/0.94/security/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
hbase/branches/0.94/security/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
hbase/branches/0.94/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/Chore.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HConstants.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
hbase/branches/0.94/src/main/ruby/hbase/admin.rb
hbase/branches/0.94/src/main/ruby/shell.rb
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java
Modified: hbase/branches/0.94/security/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/security/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/security/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java (original)
+++ hbase/branches/0.94/security/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java Mon Mar 4 11:24:50 2013
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.filter.Wr
import org.apache.hadoop.hbase.ipc.HBaseRPC;
import org.apache.hadoop.hbase.ipc.ProtocolSignature;
import org.apache.hadoop.hbase.ipc.RequestContext;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
@@ -730,6 +731,55 @@ public class AccessController extends Ba
AccessControlLists.init(ctx.getEnvironment().getMasterServices());
}
+ @Override
+ public void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ requirePermission("snapshot", Permission.Action.ADMIN);
+ }
+
+ @Override
+ public void postSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ }
+
+ @Override
+ public void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ requirePermission("cloneSnapshot", Permission.Action.ADMIN);
+ }
+
+ @Override
+ public void postCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ }
+
+ @Override
+ public void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ requirePermission("restoreSnapshot", Permission.Action.ADMIN);
+ }
+
+ @Override
+ public void postRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ }
+
+ @Override
+ public void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot) throws IOException {
+ requirePermission("deleteSnapshot", Permission.Action.ADMIN);
+ }
+
+ @Override
+ public void postDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot) throws IOException {
+ }
/* ---- RegionObserver implementation ---- */
Modified: hbase/branches/0.94/security/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/security/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/security/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java (original)
+++ hbase/branches/0.94/security/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java Mon Mar 4 11:24:50 2013
@@ -113,6 +113,11 @@ public class TestAccessController {
public static void setupBeforeClass() throws Exception {
// setup configuration
conf = TEST_UTIL.getConfiguration();
+ conf.set("hbase.master.hfilecleaner.plugins",
+ "org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner," +
+ "org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner");
+ conf.set("hbase.master.logcleaner.plugins",
+ "org.apache.hadoop.hbase.master.snapshot.SnapshotLogCleaner");
SecureTestUtil.enableSecurity(conf);
TEST_UTIL.startMiniCluster();
@@ -1721,4 +1726,51 @@ public class TestAccessController {
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
}
+
+ @Test
+ public void testSnapshot() throws Exception {
+ PrivilegedExceptionAction snapshotAction = new PrivilegedExceptionAction() {
+ public Object run() throws Exception {
+ ACCESS_CONTROLLER.preSnapshot(ObserverContext.createAndPrepare(CP_ENV, null),
+ null, null);
+ return null;
+ }
+ };
+
+ PrivilegedExceptionAction deleteAction = new PrivilegedExceptionAction() {
+ public Object run() throws Exception {
+ ACCESS_CONTROLLER.preDeleteSnapshot(ObserverContext.createAndPrepare(CP_ENV, null),
+ null);
+ return null;
+ }
+ };
+
+ PrivilegedExceptionAction restoreAction = new PrivilegedExceptionAction() {
+ public Object run() throws Exception {
+ ACCESS_CONTROLLER.preRestoreSnapshot(ObserverContext.createAndPrepare(CP_ENV, null),
+ null, null);
+ return null;
+ }
+ };
+
+ PrivilegedExceptionAction cloneAction = new PrivilegedExceptionAction() {
+ public Object run() throws Exception {
+ ACCESS_CONTROLLER.preCloneSnapshot(ObserverContext.createAndPrepare(CP_ENV, null),
+ null, null);
+ return null;
+ }
+ };
+
+ verifyAllowed(snapshotAction, SUPERUSER, USER_ADMIN);
+ verifyDenied(snapshotAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+
+ verifyAllowed(cloneAction, SUPERUSER, USER_ADMIN);
+ verifyDenied(deleteAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+
+ verifyAllowed(restoreAction, SUPERUSER, USER_ADMIN);
+ verifyDenied(restoreAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+
+ verifyAllowed(deleteAction, SUPERUSER, USER_ADMIN);
+ verifyDenied(cloneAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+ }
}
Modified: hbase/branches/0.94/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon (original)
+++ hbase/branches/0.94/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon Mon Mar 4 11:24:50 2013
@@ -43,6 +43,7 @@ org.apache.hadoop.hbase.client.HBaseAdmi
org.apache.hadoop.hbase.client.HConnectionManager;
org.apache.hadoop.hbase.HTableDescriptor;
org.apache.hadoop.hbase.HBaseConfiguration;
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
</%import>
<%if format.equals("json") %>
<& ../common/TaskMonitorTmpl; filter = filter; format = "json" &>
@@ -123,6 +124,7 @@ org.apache.hadoop.hbase.HBaseConfigurati
<%if (metaLocation != null) %>
<& userTables &>
</%if>
+<& userSnapshots &>
<%if (servers != null) %>
<& regionServers &>
</%if>
@@ -169,7 +171,6 @@ org.apache.hadoop.hbase.HBaseConfigurati
<%def userTables>
<%java>
HTableDescriptor[] tables = admin.listTables();
- HConnectionManager.deleteConnection(admin.getConfiguration());
</%java>
<%if (tables != null && tables.length > 0)%>
<table>
@@ -197,6 +198,32 @@ org.apache.hadoop.hbase.HBaseConfigurati
</%if>
</%def>
+<%def userSnapshots>
+<%java>
+ List<SnapshotDescription> snapshots = admin.listSnapshots();
+</%java>
+<%if (snapshots != null && snapshots.size() > 0)%>
+<table>
+<tr>
+ <th>Snapshot</th>
+ <th>Table</th>
+ <th>Creation Time</th>
+ <th>Type</th>
+</tr>
+<%for SnapshotDescription snapshotDesc : snapshots%>
+<tr>
+ <td><% snapshotDesc.getName() %></td>
+ <td><a href="table.jsp?name=<% snapshotDesc.getTable() %>"><% snapshotDesc.getTable() %></a></td>
+ <td><% new Date(snapshotDesc.getCreationTime()) %></td>
+ <td><% snapshotDesc.getType() %></td>
+</tr>
+</%for>
+
+<p> <% snapshots.size() %> snapshot(s) in set.</p>
+</table>
+</%if>
+</%def>
+
<%def regionServers>
<h2>Region Servers</h2>
<%if (servers != null && servers.size() > 0)%>
@@ -254,3 +281,7 @@ org.apache.hadoop.hbase.HBaseConfigurati
</table>
</%if>
</%def>
+
+<%java>
+ HConnectionManager.deleteConnection(admin.getConfiguration(), false);
+</%java>
Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/Chore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/Chore.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/Chore.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/Chore.java Mon Mar 4 11:24:50 2013
@@ -91,6 +91,14 @@ public abstract class Chore extends HasT
this.sleeper.skipSleepCycle();
}
+ /*
+ * Exposed for TESTING!
+ * calls directly the chore method, from the current thread.
+ */
+ public void choreForTesting() {
+ chore();
+ }
+
/**
* Override to run a task before we start looping.
* @return true if initial chore was successful
Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/DaemonThreadFactory.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/DaemonThreadFactory.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/DaemonThreadFactory.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/DaemonThreadFactory.java Mon Mar 4 11:24:50 2013
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Thread factory that creates daemon threads
+ */
+public class DaemonThreadFactory implements ThreadFactory {
+ static final AtomicInteger poolNumber = new AtomicInteger(1);
+ final ThreadGroup group;
+ final AtomicInteger threadNumber = new AtomicInteger(1);
+ final String namePrefix;
+
+ public DaemonThreadFactory(String name) {
+ SecurityManager s = System.getSecurityManager();
+ group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup();
+ namePrefix = name + poolNumber.getAndIncrement() + "-thread-";
+ }
+
+ @Override
+ public Thread newThread(Runnable r) {
+ Thread t = new Thread(group, r, namePrefix + threadNumber.getAndIncrement(), 0);
+ if (!t.isDaemon()) {
+ t.setDaemon(true);
+ }
+ if (t.getPriority() != Thread.NORM_PRIORITY) {
+ t.setPriority(Thread.NORM_PRIORITY);
+ }
+ return t;
+ }
+}
Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java Mon Mar 4 11:24:50 2013
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.io.hfile.
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
@@ -403,7 +404,7 @@ public class HColumnDescriptor implement
* @throws IllegalArgumentException If not null and not a legitimate family
* name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
* <code>b</code> can be null when deserializing). Cannot start with a '.'
- * either.
+ * either. Also Family can not be an empty value or equal "recovered.edits".
*/
public static byte [] isLegalFamilyName(final byte [] b) {
if (b == null) {
@@ -420,6 +421,11 @@ public class HColumnDescriptor implement
Bytes.toString(b));
}
}
+ byte[] recoveredEdit = Bytes.toBytes(HLog.RECOVERED_EDITS_DIR);
+ if (Bytes.equals(recoveredEdit, b)) {
+ throw new IllegalArgumentException("Family name cannot be: " +
+ HLog.RECOVERED_EDITS_DIR);
+ }
return b;
}
Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HConstants.java Mon Mar 4 11:24:50 2013
@@ -671,11 +671,21 @@ public final class HConstants {
/** Directory under /hbase where archived hfiles are stored */
public static final String HFILE_ARCHIVE_DIRECTORY = ".archive";
+ /**
+ * Name of the directory to store all snapshots. See SnapshotDescriptionUtils for
+ * remaining snapshot constants; this is here to keep HConstants dependencies at a minimum and
+ * uni-directional.
+ */
+ public static final String SNAPSHOT_DIR_NAME = ".snapshot";
+
+ /** Temporary directory used for table creation and deletion */
+ public static final String HBASE_TEMP_DIRECTORY = ".tmp";
+
/** Directories that are not HBase table directories */
public static final List<String> HBASE_NON_TABLE_DIRS =
Collections.unmodifiableList(Arrays.asList(new String[] { HREGION_LOGDIR_NAME,
HREGION_OLDLOGDIR_NAME, CORRUPT_DIR_NAME, SPLIT_LOGDIR_NAME,
- HBCK_SIDELINEDIR_NAME, HFILE_ARCHIVE_DIRECTORY }));
+ HBCK_SIDELINEDIR_NAME, HFILE_ARCHIVE_DIRECTORY, SNAPSHOT_DIR_NAME, HBASE_TEMP_DIRECTORY }));
/** Directories that are not HBase user table directories */
public static final List<String> HBASE_NON_USER_TABLE_DIRS =
Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java Mon Mar 4 11:24:50 2013
@@ -80,6 +80,9 @@ implements WritableComparable<HRegionInf
private static final int ENC_SEPARATOR = '.';
public static final int MD5_HEX_LENGTH = 32;
+ /** A non-capture group so that this can be embedded. */
+ public static final String ENCODED_REGION_NAME_REGEX = "(?:[a-f0-9]+)";
+
/**
* Does region name contain its encoded name?
* @param regionName region name
Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java Mon Mar 4 11:24:50 2013
@@ -358,6 +358,9 @@ public class HTableDescriptor implements
Bytes.equals(tableName, HConstants.META_TABLE_NAME);
}
+ // A non-capture group so that this can be embedded.
+ public static final String VALID_USER_TABLE_REGEX = "(?:[a-zA-Z_0-9][a-zA-Z_0-9.-]*)";
+
/**
* Check passed byte buffer, "tableName", is legal user-space table name.
* @return Returns passed <code>tableName</code> param
Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java Mon Mar 4 11:24:50 2013
@@ -56,7 +56,7 @@ public class HFileArchiver {
private static final String SEPARATOR = ".";
/** Number of retries in case of fs operation failure */
- private static final int DEFAULT_RETRIES_NUMBER = 3;
+ private static final int DEFAULT_RETRIES_NUMBER = 6;
private HFileArchiver() {
// hidden ctor since this is just a util
@@ -73,14 +73,12 @@ public class HFileArchiver {
public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info)
throws IOException {
Path rootDir = FSUtils.getRootDir(conf);
- archiveRegion(conf, fs, rootDir, HTableDescriptor.getTableDir(rootDir, info.getTableName()),
+ archiveRegion(fs, rootDir, HTableDescriptor.getTableDir(rootDir, info.getTableName()),
HRegion.getRegionDir(rootDir, info));
}
-
/**
* Remove an entire region from the table directory via archiving the region's hfiles.
- * @param conf the configuration to use
* @param fs {@link FileSystem} from which to remove the region
* @param rootdir {@link Path} to the root directory where hbase files are stored (for building
* the archive path)
@@ -90,8 +88,7 @@ public class HFileArchiver {
* operations could not complete.
* @throws IOException if the request cannot be completed
*/
- public static boolean archiveRegion(Configuration conf, FileSystem fs, Path rootdir,
- Path tableDir, Path regionDir)
+ public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("ARCHIVING region " + regionDir.toString());
@@ -110,7 +107,7 @@ public class HFileArchiver {
// make sure the regiondir lives under the tabledir
Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
- Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(conf, tableDir, regionDir);
+ Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir, tableDir, regionDir);
LOG.debug("Have an archive directory, preparing to move files");
FileStatusConverter getAsFile = new FileStatusConverter(fs);
@@ -150,7 +147,7 @@ public class HFileArchiver {
}
throw new IOException("Received error when attempting to archive files (" + toArchive
- + "), cannot delete region directory. ");
+ + "), cannot delete region directory.");
}
/**
@@ -240,6 +237,35 @@ public class HFileArchiver {
}
/**
+ * Archive the store file
+ * @param fs the filesystem where the store files live
+ * @param regionInfo region hosting the store files
+ * @param conf {@link Configuration} to examine to determine the archive directory
+ * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
+ * @param family the family hosting the store files
+ * @param storeFile file to be archived
+ * @throws IOException if the files could not be correctly disposed.
+ */
+ public static void archiveStoreFile(FileSystem fs, HRegionInfo regionInfo,
+ Configuration conf, Path tableDir, byte[] family, Path storeFile) throws IOException {
+ Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
+ // make sure we don't archive if we can't and that the archive dir exists
+ if (!fs.mkdirs(storeArchiveDir)) {
+ throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
+ + Bytes.toString(family) + ", deleting compacted files instead.");
+ }
+
+ // do the actual archive
+ long start = EnvironmentEdgeManager.currentTimeMillis();
+ File file = new FileablePath(fs, storeFile);
+ if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
+ throw new IOException("Failed to archive/delete the file for region:"
+ + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family)
+ + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
+ }
+ }
+
+ /**
* Archive the given files and resolve any conflicts with existing files via appending the time
* archiving started (so all conflicts in the same group have the same timestamp appended).
* <p>
@@ -414,6 +440,34 @@ public class HFileArchiver {
}
/**
+ * Simple delete of regular files from the {@link FileSystem}.
+ * <p>
+ * This method is a more generic implementation that the other deleteXXX
+ * methods in this class, allowing more code reuse at the cost of a couple
+ * more, short-lived objects (which should have minimum impact on the jvm).
+ * @param fs {@link FileSystem} where the files live
+ * @param files {@link Collection} of files to be deleted
+ * @throws IOException if a file cannot be deleted. All files will be
+ * attempted to deleted before throwing the exception, rather than
+ * failing at the first file.
+ */
+ private static void deleteFilesWithoutArchiving(Collection<File> files) throws IOException {
+ List<IOException> errors = new ArrayList<IOException>(0);
+ for (File file : files) {
+ try {
+ LOG.debug("Deleting region file:" + file);
+ file.delete();
+ } catch (IOException e) {
+ LOG.error("Failed to delete file:" + file);
+ errors.add(e);
+ }
+ }
+ if (errors.size() > 0) {
+ throw MultipleIOException.createIOException(errors);
+ }
+ }
+
+ /**
* Without regard for backup, delete a region. Should be used with caution.
* @param regionDir {@link Path} to the region to be deleted.
* @param fs FileSystem from which to delete the region
Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java Mon Mar 4 11:24:50 2013
@@ -18,8 +18,10 @@
package org.apache.hadoop.hbase.catalog;
import java.io.IOException;
+import java.io.InterruptedIOException;
import java.net.ConnectException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import org.apache.commons.logging.Log;
@@ -30,6 +32,7 @@ import org.apache.hadoop.hbase.NotAllMet
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
@@ -124,11 +127,44 @@ public class MetaEditor {
* @param d Delete to add to .META.
* @throws IOException
*/
- static void deleteMetaTable(final CatalogTracker ct, final Delete d)
- throws IOException {
+ static void deleteFromMetaTable(final CatalogTracker ct, final Delete d)
+ throws IOException {
+ List<Delete> dels = new ArrayList<Delete>(1);
+ dels.add(d);
+ deleteFromMetaTable(ct, dels);
+ }
+
+ /**
+ * Delete the passed <code>deletes</code> from the <code>.META.</code> table.
+ * @param ct CatalogTracker on whose back we will ride the edit.
+ * @param deletes Deletes to add to .META. This list should support #remove.
+ * @throws IOException
+ */
+ public static void deleteFromMetaTable(final CatalogTracker ct, final List<Delete> deletes)
+ throws IOException {
+ HTable t = MetaReader.getMetaHTable(ct);
+ try {
+ t.delete(deletes);
+ } finally {
+ t.close();
+ }
+ }
+
+ /**
+ * Execute the passed <code>mutations</code> against <code>.META.</code> table.
+ * @param ct CatalogTracker on whose back we will ride the edit.
+ * @param mutations Puts and Deletes to execute on .META.
+ * @throws IOException
+ */
+ static void mutateMetaTable(final CatalogTracker ct, final List<Mutation> mutations)
+ throws IOException {
HTable t = MetaReader.getMetaHTable(ct);
try {
- t.delete(d);
+ t.batch(mutations);
+ } catch (InterruptedException e) {
+ InterruptedIOException ie = new InterruptedIOException(e.getMessage());
+ ie.initCause(e);
+ throw ie;
} finally {
t.close();
}
@@ -272,11 +308,57 @@ public class MetaEditor {
HRegionInfo regionInfo)
throws IOException {
Delete delete = new Delete(regionInfo.getRegionName());
- deleteMetaTable(catalogTracker, delete);
+ deleteFromMetaTable(catalogTracker, delete);
LOG.info("Deleted region " + regionInfo.getRegionNameAsString() + " from META");
}
/**
+ * Deletes the specified regions from META.
+ * @param catalogTracker
+ * @param regionsInfo list of regions to be deleted from META
+ * @throws IOException
+ */
+ public static void deleteRegions(CatalogTracker catalogTracker,
+ List<HRegionInfo> regionsInfo) throws IOException {
+ List<Delete> deletes = new ArrayList<Delete>(regionsInfo.size());
+ for (HRegionInfo hri: regionsInfo) {
+ deletes.add(new Delete(hri.getRegionName()));
+ }
+ deleteFromMetaTable(catalogTracker, deletes);
+ LOG.info("Deleted from META, regions: " + regionsInfo);
+ }
+
+ /**
+ * Adds and Removes the specified regions from .META.
+ * @param catalogTracker
+ * @param regionsToRemove list of regions to be deleted from META
+ * @param regionsToAdd list of regions to be added to META
+ * @throws IOException
+ */
+ public static void mutateRegions(CatalogTracker catalogTracker,
+ final List<HRegionInfo> regionsToRemove, final List<HRegionInfo> regionsToAdd)
+ throws IOException {
+ List<Mutation> mutation = new ArrayList<Mutation>();
+ if (regionsToRemove != null) {
+ for (HRegionInfo hri: regionsToRemove) {
+ mutation.add(new Delete(hri.getRegionName()));
+ }
+ }
+ if (regionsToAdd != null) {
+ for (HRegionInfo hri: regionsToAdd) {
+ mutation.add(makePutFromRegionInfo(hri));
+ }
+ }
+ mutateMetaTable(catalogTracker, mutation);
+ if (regionsToRemove != null && regionsToRemove.size() > 0) {
+ LOG.debug("Deleted from META, regions: " + regionsToRemove);
+ }
+ if (regionsToAdd != null && regionsToAdd.size() > 0) {
+ LOG.debug("Add to META, regions: " + regionsToAdd);
+ }
+ }
+
+ /**
* Deletes daughters references in offlined split parent.
* @param catalogTracker
* @param parent Parent row we're to remove daughter reference from
@@ -289,7 +371,7 @@ public class MetaEditor {
Delete delete = new Delete(parent.getRegionName());
delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
- deleteMetaTable(catalogTracker, delete);
+ deleteFromMetaTable(catalogTracker, delete);
LOG.info("Deleted daughters references, qualifier=" + Bytes.toStringBinary(HConstants.SPLITA_QUALIFIER) +
" and qualifier=" + Bytes.toStringBinary(HConstants.SPLITB_QUALIFIER) +
", from parent " + parent.getRegionNameAsString());
Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java Mon Mar 4 11:24:50 2013
@@ -61,14 +61,24 @@ import org.apache.hadoop.hbase.ipc.Copro
import org.apache.hadoop.hbase.ipc.HMasterInterface;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.ipc.MasterExecRPCInvoker;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest.CompactionState;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
+import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
+import org.apache.hadoop.hbase.snapshot.HSnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils;
+import com.google.protobuf.ServiceException;
+
/**
* Provides an interface to manage HBase database table metadata + general
* administrative functions. Use HBaseAdmin to create, drop, list, enable and
@@ -194,7 +204,7 @@ public class HBaseAdmin implements Abort
this.aborted = true;
throw new RuntimeException(why, e);
}
-
+
@Override
public boolean isAborted(){
return this.aborted;
@@ -601,7 +611,7 @@ public class HBaseAdmin implements Abort
// continue
}
}
-
+
if (tableExists) {
throw new IOException("Retries exhausted, it took too long to wait"+
" for the table " + Bytes.toString(tableName) + " to be deleted.");
@@ -676,9 +686,23 @@ public class HBaseAdmin implements Abort
enableTableAsync(tableName);
// Wait until all regions are enabled
+ waitUntilTableIsEnabled(tableName);
+
+ LOG.info("Enabled table " + Bytes.toString(tableName));
+ }
+
+ /**
+ * Wait for the table to be enabled and available
+ * If enabling the table exceeds the retry period, an exception is thrown.
+ * @param tableName name of the table
+ * @throws IOException if a remote or network exception occurs or
+ * table is not enabled after the retries period.
+ */
+ private void waitUntilTableIsEnabled(final byte[] tableName) throws IOException {
boolean enabled = false;
+ long start = EnvironmentEdgeManager.currentTimeMillis();
for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
- enabled = isTableEnabled(tableName);
+ enabled = isTableEnabled(tableName) && isTableAvailable(tableName);
if (enabled) {
break;
}
@@ -697,10 +721,10 @@ public class HBaseAdmin implements Abort
}
}
if (!enabled) {
- throw new IOException("Unable to enable table " +
- Bytes.toString(tableName));
+ long msec = EnvironmentEdgeManager.currentTimeMillis() - start;
+ throw new IOException("Table '" + Bytes.toString(tableName) +
+ "' not yet enabled, after " + msec + "ms.");
}
- LOG.info("Enabled table " + Bytes.toString(tableName));
}
public void enableTableAsync(final String tableName)
@@ -1119,7 +1143,7 @@ public class HBaseAdmin implements Abort
* servername is provided then based on the online regions in the specified
* regionserver the specified region will be closed. The master will not be
* informed of the close. Note that the regionname is the encoded regionname.
- *
+ *
* @param encodedRegionName
* The encoded region name; i.e. the hash that makes up the region
* name suffix: e.g. if regionname is
@@ -1255,7 +1279,7 @@ public class HBaseAdmin implements Abort
throws IOException, InterruptedException {
compact(tableNameOrRegionName, null, false);
}
-
+
/**
* Compact a column family within a table or region.
* Asynchronous operation.
@@ -1309,7 +1333,7 @@ public class HBaseAdmin implements Abort
throws IOException, InterruptedException {
compact(tableNameOrRegionName, null, true);
}
-
+
/**
* Major compact a column family within a table or region.
* Asynchronous operation.
@@ -1761,7 +1785,7 @@ public class HBaseAdmin implements Abort
* @param tableName the name of the table
* @return Ordered list of {@link HRegionInfo}.
* @throws IOException
- */
+ */
public List<HRegionInfo> getTableRegions(final byte[] tableName)
throws IOException {
CatalogTracker ct = getCatalogTracker();
@@ -1773,7 +1797,7 @@ public class HBaseAdmin implements Abort
}
return Regions;
}
-
+
public void close() throws IOException {
if (this.connection != null) {
this.connection.close();
@@ -1793,14 +1817,14 @@ public class HBaseAdmin implements Abort
/**
* Roll the log writer. That is, start writing log messages to a new file.
- *
+ *
* @param serverName
* The servername of the regionserver. A server name is made of host,
* port and startcode. This is mandatory. Here is an example:
* <code> host187.example.com,60020,1289493121758</code>
* @return If lots of logs, flush the returned regions so next time through
* we can clean logs. Returns null if nothing to flush. Names are actual
- * region names as returned by {@link HRegionInfo#getEncodedName()}
+ * region names as returned by {@link HRegionInfo#getEncodedName()}
* @throws IOException if a remote or network exception occurs
* @throws FailedLogCloseException
*/
@@ -1921,4 +1945,367 @@ public class HBaseAdmin implements Abort
connection,
protocol));
}
+
+
+ /**
+ * Create a timestamp consistent snapshot for the given table.
+ * <p>
+ * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
+ * snapshot with the same name (even a different type or with different parameters) will fail with
+ * a {@link SnapshotCreationException} indicating the duplicate naming.
+ * <p>
+ * Snapshot names follow the same naming constraints as tables in HBase. See
+ * {@link HTableDescriptor#isLegalTableName(byte[])}.
+ * @param snapshotName name of the snapshot to be created
+ * @param tableName name of the table for which snapshot is created
+ * @throws IOException if a remote or network exception occurs
+ * @throws SnapshotCreationException if snapshot creation failed
+ * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
+ */
+ public void snapshot(final String snapshotName, final String tableName) throws IOException,
+ SnapshotCreationException, IllegalArgumentException {
+ snapshot(snapshotName, tableName, SnapshotDescription.Type.FLUSH);
+ }
+
+ /**
+ * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
+ * taken. If the table is disabled, an offline snapshot is taken.
+ * <p>
+ * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
+ * snapshot with the same name (even a different type or with different parameters) will fail with
+ * a {@link SnapshotCreationException} indicating the duplicate naming.
+ * <p>
+ * Snapshot names follow the same naming constraints as tables in HBase. See
+ * {@link HTableDescriptor#isLegalTableName(byte[])}.
+ * @param snapshotName name of the snapshot to be created
+ * @param tableName name of the table for which snapshot is created
+ * @throws IOException if a remote or network exception occurs
+ * @throws SnapshotCreationException if snapshot creation failed
+ * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
+ */
+ public void snapshot(final byte[] snapshotName, final byte[] tableName) throws IOException,
+ SnapshotCreationException, IllegalArgumentException {
+ snapshot(Bytes.toString(snapshotName), Bytes.toString(tableName));
+ }
+
+ /**
+ * Create typed snapshot of the table.
+ * <p>
+ * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
+ * snapshot with the same name (even a different type or with different parameters) will fail with
+ * a {@link SnapshotCreationException} indicating the duplicate naming.
+ * <p>
+ * Snapshot names follow the same naming constraints as tables in HBase. See
+ * {@link HTableDescriptor#isLegalTableName(byte[])}.
+ * <p>
+ * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
+ * snapshots stored on the cluster
+ * @param tableName name of the table to snapshot
+ * @param type type of snapshot to take
+ * @throws IOException we fail to reach the master
+ * @throws SnapshotCreationException if snapshot creation failed
+ * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
+ */
+ public void snapshot(final String snapshotName, final String tableName,
+ SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
+ IllegalArgumentException {
+ SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
+ builder.setTable(tableName);
+ builder.setName(snapshotName);
+ builder.setType(type);
+ snapshot(builder.build());
+ }
+
+ /**
+ * Take a snapshot and wait for the server to complete that snapshot (blocking).
+ * <p>
+ * Only a single snapshot should be taken at a time for an instance of HBase, or results may be
+ * undefined (you can tell multiple HBase clusters to snapshot at the same time, but only one at a
+ * time for a single cluster).
+ * <p>
+ * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
+ * snapshot with the same name (even a different type or with different parameters) will fail with
+ * a {@link SnapshotCreationException} indicating the duplicate naming.
+ * <p>
+ * Snapshot names follow the same naming constraints as tables in HBase. See
+ * {@link HTableDescriptor#isLegalTableName(byte[])}.
+ * <p>
+ * You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])}
+ * unless you are sure about the type of snapshot that you want to take.
+ * @param snapshot snapshot to take
+ * @throws IOException or we lose contact with the master.
+ * @throws SnapshotCreationException if snapshot failed to be taken
+ * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
+ */
+ public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException,
+ IllegalArgumentException {
+ HSnapshotDescription snapshotWritable = new HSnapshotDescription(snapshot);
+
+ try {
+ // actually take the snapshot
+ long max = takeSnapshotAsync(snapshot);
+ long start = EnvironmentEdgeManager.currentTimeMillis();
+ long maxPauseTime = max / this.numRetries;
+ boolean done = false;
+ int tries = 0;
+ LOG.debug("Waiting a max of " + max + " ms for snapshot '" +
+ SnapshotDescriptionUtils.toString(snapshot) + "' to complete. (max " +
+ maxPauseTime + " ms per retry)");
+ while (tries == 0 || (EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done) {
+ try {
+ // sleep a backoff <= pauseTime amount
+ long sleep = getPauseTime(tries++);
+ sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
+ LOG.debug("(#" + tries + ") Sleeping: " + sleep +
+ "ms while waiting for snapshot completion.");
+ Thread.sleep(sleep);
+
+ } catch (InterruptedException e) {
+ LOG.debug("Interrupted while waiting for snapshot " + snapshot + " to complete");
+ Thread.currentThread().interrupt();
+ }
+ LOG.debug("Getting current status of snapshot from master...");
+ done = getMaster().isSnapshotDone(snapshotWritable);
+ }
+
+ if (!done) {
+ throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
+ + "' wasn't completed in expectedTime:" + max + " ms", snapshot);
+ }
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
+ }
+
+ /**
+ * Take a snapshot without waiting for the server to complete that snapshot (asynchronous)
+ * <p>
+ * Only a single snapshot should be taken at a time, or results may be undefined.
+ * @param snapshot snapshot to take
+ * @return the max time in millis to wait for the snapshot
+ * @throws IOException if the snapshot did not succeed or we lose contact with the master.
+ * @throws SnapshotCreationException if snapshot creation failed
+ * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
+ */
+ public long takeSnapshotAsync(SnapshotDescription snapshot) throws IOException,
+ SnapshotCreationException {
+ SnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
+ HSnapshotDescription snapshotWritable = new HSnapshotDescription(snapshot);
+ return getMaster().snapshot(snapshotWritable);
+ }
+
+ /**
+ * Check the current state of the passed snapshot.
+ * <p>
+ * There are three possible states:
+ * <ol>
+ * <li>running - returns <tt>false</tt></li>
+ * <li>finished - returns <tt>true</tt></li>
+ * <li>finished with error - throws the exception that caused the snapshot to fail</li>
+ * </ol>
+ * <p>
+ * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
+ * run/started since the snapshot your are checking, you will recieve an
+ * {@link UnknownSnapshotException}.
+ * @param snapshot description of the snapshot to check
+ * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
+ * running
+ * @throws IOException if we have a network issue
+ * @throws HBaseSnapshotException if the snapshot failed
+ * @throws UnknownSnapshotException if the requested snapshot is unknown
+ */
+ public boolean isSnapshotFinished(final SnapshotDescription snapshot)
+ throws IOException, HBaseSnapshotException, UnknownSnapshotException {
+ try {
+ return getMaster().isSnapshotDone(new HSnapshotDescription(snapshot));
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
+ }
+
+ /**
+ * Restore the specified snapshot on the original table. (The table must be disabled)
+ * Before restoring the table, a new snapshot with the current table state is created.
+ * In case of failure, the table will be rolled back to the its original state.
+ *
+ * @param snapshotName name of the snapshot to restore
+ * @throws IOException if a remote or network exception occurs
+ * @throws RestoreSnapshotException if snapshot failed to be restored
+ * @throws IllegalArgumentException if the restore request is formatted incorrectly
+ */
+ public void restoreSnapshot(final byte[] snapshotName)
+ throws IOException, RestoreSnapshotException {
+ restoreSnapshot(Bytes.toString(snapshotName));
+ }
+
+ /**
+ * Restore the specified snapshot on the original table. (The table must be disabled)
+ * Before restoring the table, a new snapshot with the current table state is created.
+ * In case of failure, the table will be rolled back to its original state.
+ *
+ * @param snapshotName name of the snapshot to restore
+ * @throws IOException if a remote or network exception occurs
+ * @throws RestoreSnapshotException if snapshot failed to be restored
+ * @throws IllegalArgumentException if the restore request is formatted incorrectly
+ */
+ public void restoreSnapshot(final String snapshotName)
+ throws IOException, RestoreSnapshotException {
+ String rollbackSnapshot = snapshotName + "-" + EnvironmentEdgeManager.currentTimeMillis();
+
+ String tableName = null;
+ for (SnapshotDescription snapshotInfo: listSnapshots()) {
+ if (snapshotInfo.getName().equals(snapshotName)) {
+ tableName = snapshotInfo.getTable();
+ break;
+ }
+ }
+
+ if (tableName == null) {
+ throw new RestoreSnapshotException(
+ "Unable to find the table name for snapshot=" + snapshotName);
+ }
+
+ // Take a snapshot of the current state
+ snapshot(rollbackSnapshot, tableName);
+
+ // Restore snapshot
+ try {
+ internalRestoreSnapshot(snapshotName, tableName);
+ } catch (IOException e) {
+ // Try to rollback
+ try {
+ String msg = "Restore snapshot=" + snapshotName +
+ " failed. Rollback to snapshot=" + rollbackSnapshot + " succeeded.";
+ LOG.error(msg, e);
+ internalRestoreSnapshot(rollbackSnapshot, tableName);
+ throw new RestoreSnapshotException(msg, e);
+ } catch (IOException ex) {
+ String msg = "Failed to restore and rollback to snapshot=" + rollbackSnapshot;
+ LOG.error(msg, ex);
+ throw new RestoreSnapshotException(msg, ex);
+ }
+ }
+ }
+
+ /**
+ * Create a new table by cloning the snapshot content.
+ *
+ * @param snapshotName name of the snapshot to be cloned
+ * @param tableName name of the table where the snapshot will be restored
+ * @throws IOException if a remote or network exception occurs
+ * @throws TableExistsException if table to be created already exists
+ * @throws RestoreSnapshotException if snapshot failed to be cloned
+ * @throws IllegalArgumentException if the specified table has not a valid name
+ */
+ public void cloneSnapshot(final byte[] snapshotName, final byte[] tableName)
+ throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
+ cloneSnapshot(Bytes.toString(snapshotName), Bytes.toString(tableName));
+ }
+
+ /**
+ * Create a new table by cloning the snapshot content.
+ *
+ * @param snapshotName name of the snapshot to be cloned
+ * @param tableName name of the table where the snapshot will be restored
+ * @throws IOException if a remote or network exception occurs
+ * @throws TableExistsException if table to be created already exists
+ * @throws RestoreSnapshotException if snapshot failed to be cloned
+ * @throws IllegalArgumentException if the specified table has not a valid name
+ */
+ public void cloneSnapshot(final String snapshotName, final String tableName)
+ throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
+ if (tableExists(tableName)) {
+ throw new TableExistsException("Table '" + tableName + " already exists");
+ }
+ internalRestoreSnapshot(snapshotName, tableName);
+ waitUntilTableIsEnabled(Bytes.toBytes(tableName));
+ }
+
+ /**
+ * Execute Restore/Clone snapshot and wait for the server to complete (blocking).
+ * To check if the cloned table exists, use {@link #isTableAvailable} -- it is not safe to
+ * create an HTable instance to this table before it is available.
+ * @param snapshot snapshot to restore
+ * @param tableName table name to restore the snapshot on
+ * @throws IOException if a remote or network exception occurs
+ * @throws RestoreSnapshotException if snapshot failed to be restored
+ * @throws IllegalArgumentException if the restore request is formatted incorrectly
+ */
+ private void internalRestoreSnapshot(final String snapshotName, final String tableName)
+ throws IOException, RestoreSnapshotException {
+ HSnapshotDescription snapshot = new HSnapshotDescription(
+ SnapshotDescription.newBuilder().setName(snapshotName).setTable(tableName).build());
+
+ try {
+ // actually restore the snapshot
+ getMaster().restoreSnapshot(snapshot);
+
+ final long maxPauseTime = 5000;
+ boolean done = false;
+ int tries = 0;
+ while (!done) {
+ try {
+ // sleep a backoff <= pauseTime amount
+ long sleep = getPauseTime(tries++);
+ sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
+ LOG.debug(tries + ") Sleeping: " + sleep + " ms while we wait for snapshot restore to complete.");
+ Thread.sleep(sleep);
+ } catch (InterruptedException e) {
+ LOG.debug("Interrupted while waiting for snapshot " + snapshot + " restore to complete");
+ Thread.currentThread().interrupt();
+ }
+ LOG.debug("Getting current status of snapshot restore from master...");
+ done = getMaster().isRestoreSnapshotDone(snapshot);
+ }
+ if (!done) {
+ throw new RestoreSnapshotException("Snapshot '" + snapshot.getName() + "' wasn't restored.");
+ }
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
+ }
+
+ /**
+ * List completed snapshots.
+ * @return a list of snapshot descriptors for completed snapshots
+ * @throws IOException if a network error occurs
+ */
+ public List<SnapshotDescription> listSnapshots() throws IOException {
+ List<SnapshotDescription> snapshots = new LinkedList<SnapshotDescription>();
+ try {
+ for (HSnapshotDescription snapshot: getMaster().getCompletedSnapshots()) {
+ snapshots.add(snapshot.getProto());
+ }
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
+ return snapshots;
+ }
+
+ /**
+ * Delete an existing snapshot.
+ * @param snapshotName name of the snapshot
+ * @throws IOException if a remote or network exception occurs
+ */
+ public void deleteSnapshot(final byte[] snapshotName) throws IOException {
+ // make sure the snapshot is possibly valid
+ HTableDescriptor.isLegalTableName(snapshotName);
+ // do the delete
+ SnapshotDescription snapshot = SnapshotDescription.newBuilder()
+ .setName(Bytes.toString(snapshotName)).build();
+ try {
+ getMaster().deleteSnapshot(new HSnapshotDescription(snapshot));
+ } catch (RemoteException e) {
+ throw RemoteExceptionHandler.decodeRemoteException(e);
+ }
+ }
+
+ /**
+ * Delete an existing snapshot.
+ * @param snapshotName name of the snapshot
+ * @throws IOException if a remote or network exception occurs
+ */
+ public void deleteSnapshot(final String snapshotName) throws IOException {
+ deleteSnapshot(Bytes.toBytes(snapshotName));
+ }
}
Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java Mon Mar 4 11:24:50 2013
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.Coprocess
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.UnknownRegionException;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import java.io.IOException;
@@ -185,4 +186,50 @@ public class BaseMasterObserver implemen
HRegionInfo region, ServerName srcServer, ServerName destServer)
throws IOException {
}
+
+ @Override
+ public void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ }
+
+ @Override
+ public void postSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ }
+
+ @Override
+ public void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ }
+
+ @Override
+ public void postCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ }
+
+ @Override
+ public void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ }
+
+ @Override
+ public void postRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ }
+
+ @Override
+ public void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot) throws IOException {
+ }
+
+ @Override
+ public void postDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot) throws IOException {
+ }
}
Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java Mon Mar 4 11:24:50 2013
@@ -21,6 +21,7 @@
package org.apache.hadoop.hbase.coprocessor;
import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import java.io.IOException;
@@ -289,4 +290,100 @@ public interface MasterObserver extends
*/
void postStartMaster(final ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException;
+
+ /**
+ * Called before a new snapshot is taken.
+ * Called as part of snapshot RPC call.
+ * It can't bypass the default action, e.g., ctx.bypass() won't have effect.
+ * @param ctx the environment to interact with the framework and master
+ * @param snapshot the SnapshotDescriptor for the snapshot
+ * @param hTableDescriptor the hTableDescriptor of the table to snapshot
+ * @throws IOException
+ */
+ void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException;
+
+ /**
+ * Called after the snapshot operation has been requested.
+ * Called as part of snapshot RPC call.
+ * @param ctx the environment to interact with the framework and master
+ * @param snapshot the SnapshotDescriptor for the snapshot
+ * @param hTableDescriptor the hTableDescriptor of the table to snapshot
+ * @throws IOException
+ */
+ void postSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException;
+
+ /**
+ * Called before a snapshot is cloned.
+ * Called as part of restoreSnapshot RPC call.
+ * It can't bypass the default action, e.g., ctx.bypass() won't have effect.
+ * @param ctx the environment to interact with the framework and master
+ * @param snapshot the SnapshotDescriptor for the snapshot
+ * @param hTableDescriptor the hTableDescriptor of the table to create
+ * @throws IOException
+ */
+ void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException;
+
+ /**
+ * Called after a snapshot clone operation has been requested.
+ * Called as part of restoreSnapshot RPC call.
+ * @param ctx the environment to interact with the framework and master
+ * @param snapshot the SnapshotDescriptor for the snapshot
+ * @param hTableDescriptor the hTableDescriptor of the table to create
+ * @throws IOException
+ */
+ void postCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException;
+
+ /**
+ * Called before a snapshot is restored.
+ * Called as part of restoreSnapshot RPC call.
+ * It can't bypass the default action, e.g., ctx.bypass() won't have effect.
+ * @param ctx the environment to interact with the framework and master
+ * @param snapshot the SnapshotDescriptor for the snapshot
+ * @param hTableDescriptor the hTableDescriptor of the table to restore
+ * @throws IOException
+ */
+ void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException;
+
+ /**
+ * Called after a snapshot restore operation has been requested.
+ * Called as part of restoreSnapshot RPC call.
+ * @param ctx the environment to interact with the framework and master
+ * @param snapshot the SnapshotDescriptor for the snapshot
+ * @param hTableDescriptor the hTableDescriptor of the table to restore
+ * @throws IOException
+ */
+ void postRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException;
+
+ /**
+ * Called before a snapshot is deleted.
+ * Called as part of deleteSnapshot RPC call.
+ * It can't bypass the default action, e.g., ctx.bypass() won't have effect.
+ * @param ctx the environment to interact with the framework and master
+ * @param snapshot the SnapshotDescriptor of the snapshot to delete
+ * @throws IOException
+ */
+ void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot) throws IOException;
+
+ /**
+ * Called after the delete snapshot operation has been requested.
+ * Called as part of deleteSnapshot RPC call.
+ * @param ctx the environment to interact with the framework and master
+ * @param snapshot the SnapshotDescriptor of the snapshot to delete
+ * @throws IOException
+ */
+ void postDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot) throws IOException;
}
Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java Mon Mar 4 11:24:50 2013
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.errorhandling;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage;
+import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage;
+import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+
+/**
+ * A ForeignException is an exception from another thread or process.
+ * <p>
+ * ForeignExceptions are sent to 'remote' peers to signal an abort in the face of failures.
+ * When serialized for transmission we encode using Protobufs to ensure version compatibility.
+ * <p>
+ * Foreign exceptions contain a Throwable as its cause. This can be a "regular" exception
+ * generated locally or a ProxyThrowable that is a representation of the original exception
+ * created on original 'remote' source. These ProxyThrowables have their their stacks traces and
+ * messages overridden to reflect the original 'remote' exception. The only way these
+ * ProxyThrowables are generated are by this class's {@link #deserialize(byte[])} method.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+@SuppressWarnings("serial")
+public class ForeignException extends IOException {
+
+ /**
+ * Name of the throwable's source such as a host or thread name. Must be non-null.
+ */
+ private final String source;
+
+ /**
+ * Create a new ForeignException that can be serialized. It is assumed that this came form a
+ * local source.
+ * @param source
+ * @param cause
+ */
+ public ForeignException(String source, Throwable cause) {
+ super(cause);
+ assert source != null;
+ assert cause != null;
+ this.source = source;
+ }
+
+ /**
+ * Create a new ForeignException that can be serialized. It is assumed that this is locally
+ * generated.
+ * @param source
+ * @param msg
+ */
+ public ForeignException(String source, String msg) {
+ super(new IllegalArgumentException(msg));
+ this.source = source;
+ }
+
+ public String getSource() {
+ return source;
+ }
+
+ /**
+ * The cause of a ForeignException can be an exception that was generated on a local in process
+ * thread, or a thread from a 'remote' separate process.
+ *
+ * If the cause is a ProxyThrowable, we know it came from deserialization which usually means
+ * it came from not only another thread, but also from a remote thread.
+ *
+ * @return true if went through deserialization, false if locally generated
+ */
+ public boolean isRemote() {
+ return getCause() instanceof ProxyThrowable;
+ }
+
+ @Override
+ public String toString() {
+ String className = getCause().getClass().getName() ;
+ return className + " via " + getSource() + ":" + getLocalizedMessage();
+ }
+
+ /**
+ * Convert a stack trace to list of {@link StackTraceElement}.
+ * @param trace the stack trace to convert to protobuf message
+ * @return <tt>null</tt> if the passed stack is <tt>null</tt>.
+ */
+ private static List<StackTraceElementMessage> toStackTraceElementMessages(
+ StackTraceElement[] trace) {
+ // if there is no stack trace, ignore it and just return the message
+ if (trace == null) return null;
+ // build the stack trace for the message
+ List<StackTraceElementMessage> pbTrace =
+ new ArrayList<StackTraceElementMessage>(trace.length);
+ for (StackTraceElement elem : trace) {
+ StackTraceElementMessage.Builder stackBuilder = StackTraceElementMessage.newBuilder();
+ stackBuilder.setDeclaringClass(elem.getClassName());
+ stackBuilder.setFileName(elem.getFileName());
+ stackBuilder.setLineNumber(elem.getLineNumber());
+ stackBuilder.setMethodName(elem.getMethodName());
+ pbTrace.add(stackBuilder.build());
+ }
+ return pbTrace;
+ }
+
+ /**
+ * This is a Proxy Throwable that contains the information of the original remote exception
+ */
+ private static class ProxyThrowable extends Throwable {
+ ProxyThrowable(String msg, StackTraceElement[] trace) {
+ super(msg);
+ this.setStackTrace(trace);
+ }
+ }
+
+ /**
+ * Converts a ForeignException to an array of bytes.
+ * @param source the name of the external exception source
+ * @param t the "local" external exception (local)
+ * @return protobuf serialized version of ForeignException
+ */
+ public static byte[] serialize(String source, Throwable t) {
+ GenericExceptionMessage.Builder gemBuilder = GenericExceptionMessage.newBuilder();
+ gemBuilder.setClassName(t.getClass().getName());
+ if (t.getMessage() != null) {
+ gemBuilder.setMessage(t.getMessage());
+ }
+ // set the stack trace, if there is one
+ List<StackTraceElementMessage> stack =
+ ForeignException.toStackTraceElementMessages(t.getStackTrace());
+ if (stack != null) {
+ gemBuilder.addAllTrace(stack);
+ }
+ GenericExceptionMessage payload = gemBuilder.build();
+ ForeignExceptionMessage.Builder exception = ForeignExceptionMessage.newBuilder();
+ exception.setGenericException(payload).setSource(source);
+ ForeignExceptionMessage eem = exception.build();
+ return eem.toByteArray();
+ }
+
+ /**
+ * Takes a series of bytes and tries to generate an ForeignException instance for it.
+ * @param bytes
+ * @return the ForeignExcpetion instance
+ * @throws InvalidProtocolBufferException if there was deserialization problem this is thrown.
+ */
+ public static ForeignException deserialize(byte[] bytes) throws InvalidProtocolBufferException {
+ // figure out the data we need to pass
+ ForeignExceptionMessage eem = ForeignExceptionMessage.parseFrom(bytes);
+ GenericExceptionMessage gem = eem.getGenericException();
+ StackTraceElement [] trace = ForeignException.toStackTrace(gem.getTraceList());
+ ProxyThrowable dfe = new ProxyThrowable(gem.getMessage(), trace);
+ ForeignException e = new ForeignException(eem.getSource(), dfe);
+ return e;
+ }
+
+ /**
+ * Unwind a serialized array of {@link StackTraceElementMessage}s to a
+ * {@link StackTraceElement}s.
+ * @param traceList list that was serialized
+ * @return the deserialized list or <tt>null</tt> if it couldn't be unwound (e.g. wasn't set on
+ * the sender).
+ */
+ private static StackTraceElement[] toStackTrace(List<StackTraceElementMessage> traceList) {
+ if (traceList == null || traceList.size() == 0) {
+ return new StackTraceElement[0]; // empty array
+ }
+ StackTraceElement[] trace = new StackTraceElement[traceList.size()];
+ for (int i = 0; i < traceList.size(); i++) {
+ StackTraceElementMessage elem = traceList.get(i);
+ trace[i] = new StackTraceElement(
+ elem.getDeclaringClass(), elem.getMethodName(), elem.getFileName(), elem.getLineNumber());
+ }
+ return trace;
+ }
+}
\ No newline at end of file
Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java Mon Mar 4 11:24:50 2013
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.errorhandling;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * The dispatcher acts as the state holding entity for foreign error handling. The first
+ * exception received by the dispatcher get passed directly to the listeners. Subsequent
+ * exceptions are dropped.
+ * <p>
+ * If there are multiple dispatchers that are all in the same foreign exception monitoring group,
+ * ideally all these monitors are "peers" -- any error on one dispatcher should get propagated to
+ * all others (via rpc, or some other mechanism). Due to racing error conditions the exact reason
+ * for failure may be different on different peers, but the fact that they are in error state
+ * should eventually hold on all.
+ * <p>
+ * This is thread-safe and must be because this is expected to be used to propagate exceptions
+ * from foreign threads.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class ForeignExceptionDispatcher implements ForeignExceptionListener, ForeignExceptionSnare {
+ public static final Log LOG = LogFactory.getLog(ForeignExceptionDispatcher.class);
+ protected final String name;
+ protected final List<ForeignExceptionListener> listeners =
+ new ArrayList<ForeignExceptionListener>();
+ private ForeignException exception;
+
+ public ForeignExceptionDispatcher(String name) {
+ this.name = name;
+ }
+
+ public ForeignExceptionDispatcher() {
+ this("");
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public synchronized void receive(ForeignException e) {
+ // if we already have an exception, then ignore it
+ if (exception != null) return;
+
+ LOG.debug(name + " accepting received exception" , e);
+ // mark that we got the error
+ if (e != null) {
+ exception = e;
+ } else {
+ exception = new ForeignException(name, "");
+ }
+
+ // notify all the listeners
+ dispatch(e);
+ }
+
+ @Override
+ public synchronized void rethrowException() throws ForeignException {
+ if (exception != null) {
+ // This gets the stack where this is caused, (instead of where it was deserialized).
+ // This is much more useful for debugging
+ throw new ForeignException(exception.getSource(), exception.getCause());
+ }
+ }
+
+ @Override
+ public synchronized boolean hasException() {
+ return exception != null;
+ }
+
+ @Override
+ synchronized public ForeignException getException() {
+ return exception;
+ }
+
+ /**
+ * Sends an exception to all listeners.
+ * @param message human readable message passed to the listener
+ * @param e {@link ForeignException} containing the cause. Can be null.
+ */
+ private void dispatch(ForeignException e) {
+ // update all the listeners with the passed error
+ for (ForeignExceptionListener l: listeners) {
+ l.receive(e);
+ }
+ }
+
+ /**
+ * Listen for failures to a given process. This method should only be used during
+ * initialization and not added to after exceptions are accepted.
+ * @param errorable listener for the errors. may be null.
+ */
+ public synchronized void addListener(ForeignExceptionListener errorable) {
+ this.listeners.add(errorable);
+ }
+}
\ No newline at end of file