You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2016/09/09 20:57:09 UTC
hbase git commit: HBASE-16595 Remove reference to Admin from backup /
restore server code
Repository: hbase
Updated Branches:
refs/heads/HBASE-7912 b65a2f679 -> 260fafe53
HBASE-16595 Remove reference to Admin from backup / restore server code
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/260fafe5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/260fafe5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/260fafe5
Branch: refs/heads/HBASE-7912
Commit: 260fafe53a9de589a52d2c812518a8101cebc78b
Parents: b65a2f6
Author: tedyu <yu...@gmail.com>
Authored: Fri Sep 9 13:56:59 2016 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Fri Sep 9 13:56:59 2016 -0700
----------------------------------------------------------------------
.../hadoop/hbase/backup/impl/BackupManager.java | 7 +-
.../backup/impl/IncrementalBackupManager.java | 27 +-
.../backup/impl/RestoreTablesProcedure.java | 20 +-
.../backup/master/FullTableBackupProcedure.java | 4 +-
.../master/IncrementalTableBackupProcedure.java | 6 +-
.../hbase/backup/util/BackupServerUtil.java | 65 +++--
.../hbase/backup/util/RestoreServerUtil.java | 268 +++++++++----------
7 files changed, 204 insertions(+), 193 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/260fafe5/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
index a8d21bb..b0d329b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
@@ -50,7 +50,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.master.MasterServices;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -221,8 +222,8 @@ public class BackupManager implements Closeable {
// list with all user tables from meta. It no table available, throw the request exception.
HTableDescriptor[] htds = null;
- try (Admin hbadmin = conn.getAdmin()) {
- htds = hbadmin.listTables();
+ try {
+ htds = ((ClusterConnection)conn).listTables();
} catch (Exception e) {
throw new BackupException(e);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/260fafe5/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
index bd496ce..cde6c04 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -41,6 +42,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
+import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.wal.DefaultWALProvider;
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable.WALItem;
@@ -70,11 +74,12 @@ public class IncrementalBackupManager {
/**
* Obtain the list of logs that need to be copied out for this incremental backup. The list is set
* in BackupContext.
+ * @param svc MasterServices
* @param backupContext backup context
* @return The new HashMap of RS log timestamps after the log roll for this incremental backup.
* @throws IOException exception
*/
- public HashMap<String, Long> getIncrBackupLogFileList(BackupInfo backupContext)
+ public HashMap<String, Long> getIncrBackupLogFileList(MasterServices svc,BackupInfo backupContext)
throws IOException {
List<String> logList;
HashMap<String, Long> newTimestamps;
@@ -100,13 +105,21 @@ public class IncrementalBackupManager {
+ "In order to create an incremental backup, at least one full backup is needed.");
}
- try (Admin admin = conn.getAdmin()) {
- LOG.info("Execute roll log procedure for incremental backup ...");
- HashMap<String, String> props = new HashMap<String, String>();
- props.put("backupRoot", backupContext.getTargetRootDir());
- admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
+ LOG.info("Execute roll log procedure for incremental backup ...");
+ HashMap<String, String> props = new HashMap<String, String>();
+ props.put("backupRoot", backupContext.getTargetRootDir());
+ MasterProcedureManager mpm = svc.getMasterProcedureManagerHost()
+ .getProcedureManager(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE);
+ long waitTime = MasterProcedureUtil.execProcedure(mpm,
+ LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
- }
+ MasterProcedureUtil.waitForProcedure(mpm,
+ LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
+ LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props, waitTime,
+ conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+ HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER),
+ conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
+ HConstants.DEFAULT_HBASE_CLIENT_PAUSE));
newTimestamps = backupManager.readRegionServerLastLogRollResult();
http://git-wip-us.apache.org/repos/asf/hbase/blob/260fafe5/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java
index 214ad80..7ac11de 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.TableStateManager;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface;
@@ -146,7 +147,7 @@ public class RestoreTablesProcedure
* @param truncateIfExists truncate table if it exists
* @throws IOException exception
*/
- private void restoreImages(Connection conn, Iterator<BackupImage> it, TableName sTable,
+ private void restoreImages(MasterServices svc, Iterator<BackupImage> it, TableName sTable,
TableName tTable, boolean truncateIfExists) throws IOException {
// First image MUST be image of a FULL backup
@@ -181,7 +182,7 @@ public class RestoreTablesProcedure
if (manifest.getType() == BackupType.FULL || converted) {
LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from "
+ (converted ? "converted" : "full") + " backup image " + tableBackupPath.toString());
- restoreTool.fullRestoreTable(conn, tableBackupPath, sTable, tTable,
+ restoreTool.fullRestoreTable(svc, tableBackupPath, sTable, tTable,
converted, truncateIfExists, lastIncrBackupId);
} else { // incremental Backup
throw new IOException("Unexpected backup type " + image.getType());
@@ -195,15 +196,15 @@ public class RestoreTablesProcedure
String[] sarr = new String[logDirList.size()];
logDirList.toArray(sarr);
Path[] paths = org.apache.hadoop.util.StringUtils.stringToPath(sarr);
- restoreTool.incrementalRestoreTable(conn, tableBackupPath, paths, new TableName[] { sTable },
- new TableName[] { tTable }, lastIncrBackupId);
+ restoreTool.incrementalRestoreTable(svc, tableBackupPath, paths,
+ new TableName[] { sTable }, new TableName[] { tTable }, lastIncrBackupId);
}
LOG.info(sTable + " has been successfully restored to " + tTable);
}
/**
* Restore operation. Stage 2: resolved Backup Image dependency
- * @param conn the Connection
+ * @param svc MasterServices
* @param backupManifestMap : tableName, Manifest
* @param sTableArray The array of tables to be restored
* @param tTableArray The array of mapping tables to restore to
@@ -211,7 +212,7 @@ public class RestoreTablesProcedure
* @return set of BackupImages restored
* @throws IOException exception
*/
- private void restoreStage(Connection conn, HashMap<TableName, BackupManifest> backupManifestMap,
+ private void restoreStage(MasterServices svc, HashMap<TableName, BackupManifest> backupManifestMap,
TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException {
TreeSet<BackupImage> restoreImageSet = new TreeSet<BackupImage>();
boolean truncateIfExists = isOverwrite;
@@ -227,7 +228,7 @@ public class RestoreTablesProcedure
list.addAll(depList);
TreeSet<BackupImage> restoreList = new TreeSet<BackupImage>(list);
LOG.debug("need to clear merged Image. to be implemented in future jira");
- restoreImages(conn, restoreList.iterator(), table, tTableArray[i], truncateIfExists);
+ restoreImages(svc, restoreList.iterator(), table, tTableArray[i], truncateIfExists);
restoreImageSet.addAll(restoreList);
if (restoreImageSet != null && !restoreImageSet.isEmpty()) {
@@ -257,9 +258,8 @@ public class RestoreTablesProcedure
if (LOG.isTraceEnabled()) {
LOG.trace(this + " execute state=" + state);
}
- Connection conn = env.getMasterServices().getClusterConnection();
TableName[] tTableArray = tTableList.toArray(new TableName[tTableList.size()]);
- try (Admin admin = conn.getAdmin()) {
+ try {
switch (state) {
case VALIDATION:
@@ -276,7 +276,7 @@ public class RestoreTablesProcedure
Path rootPath = new Path(targetRootDir);
HBackupFileSystem.checkImageManifestExist(backupManifestMap, sTableArray, conf, rootPath,
backupId);
- restoreStage(env.getMasterServices().getConnection(), backupManifestMap, sTableArray,
+ restoreStage(env.getMasterServices(), backupManifestMap, sTableArray,
tTableArray, isOverwrite);
return Flow.NO_MORE_STATE;
http://git-wip-us.apache.org/repos/asf/hbase/blob/260fafe5/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java
index f2d2615..c56aaf3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java
@@ -102,8 +102,8 @@ public class FullTableBackupProcedure
this.tableList = tableList;
this.targetRootDir = targetRootDir;
backupContext =
- backupManager.createBackupContext(backupId, BackupType.FULL, tableList, targetRootDir,
- workers, bandwidth);
+ backupManager.createBackupContext(backupId, BackupType.FULL,
+ tableList, targetRootDir, workers, bandwidth);
if (tableList == null || tableList.isEmpty()) {
this.tableList = new ArrayList<>(backupContext.getTables());
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/260fafe5/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java
index 0a54e8b..64c1fb4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java
@@ -222,7 +222,8 @@ public class IncrementalTableBackupProcedure
try {
IncrementalBackupManager incrBackupManager =new IncrementalBackupManager(backupManager);
- newTimestamps = incrBackupManager.getIncrBackupLogFileList(backupContext);
+ newTimestamps = incrBackupManager.getIncrBackupLogFileList(env.getMasterServices(),
+ backupContext);
} catch (Exception e) {
setFailure("Failure in incremental-backup: preparation phase " + backupId, e);
// fail the overall backup and return
@@ -235,8 +236,7 @@ public class IncrementalTableBackupProcedure
case INCREMENTAL_COPY:
try {
// copy out the table and region info files for each table
- BackupServerUtil.copyTableRegionInfo(env.getMasterServices().getConnection(),
- backupContext, conf);
+ BackupServerUtil.copyTableRegionInfo(env.getMasterServices(), backupContext, conf);
incrementalCopy(backupContext);
// Save list of WAL files copied
backupManager.recordWALFiles(backupContext.getIncrBackupFileList());
http://git-wip-us.apache.org/repos/asf/hbase/blob/260fafe5/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java
index 5e0f3c4..486fd2b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
@@ -51,6 +52,7 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -150,53 +152,50 @@ public final class BackupServerUtil {
/**
* copy out Table RegionInfo into incremental backup image need to consider move this logic into
* HBackupFileSystem
- * @conn the Connection
+ * @param svc master service
* @param backupContext backup context
* @param conf configuration
* @throws IOException exception
* @throws InterruptedException exception
*/
- public static void copyTableRegionInfo(Connection conn, BackupInfo backupContext,
+ public static void copyTableRegionInfo(MasterServices svc, BackupInfo backupContext,
Configuration conf) throws IOException, InterruptedException {
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
// for each table in the table set, copy out the table info and region
// info files in the correct directory structure
- try (Admin admin = conn.getAdmin()) {
- for (TableName table : backupContext.getTables()) {
+ Connection conn = svc.getConnection();
+ for (TableName table : backupContext.getTables()) {
- if(!admin.tableExists(table)) {
- LOG.warn("Table "+ table+" does not exists, skipping it.");
- continue;
- }
- TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
-
- // write a copy of descriptor to the target directory
- Path target = new Path(backupContext.getBackupStatus(table).getTargetDir());
- FileSystem targetFs = target.getFileSystem(conf);
- FSTableDescriptors descriptors =
- new FSTableDescriptors(conf, targetFs, FSUtils.getRootDir(conf));
- descriptors.createTableDescriptorForTableDirectory(target, orig, false);
- LOG.debug("Attempting to copy table info for:" + table + " target: " + target +
- " descriptor: " + orig);
- LOG.debug("Finished copying tableinfo.");
- List<HRegionInfo> regions = null;
- regions = admin.getTableRegions(table);
- // For each region, write the region info to disk
- LOG.debug("Starting to write region info for table " + table);
- for (HRegionInfo regionInfo : regions) {
- Path regionDir =
- HRegion.getRegionDir(new Path(backupContext.getBackupStatus(table).getTargetDir()),
+ if(!MetaTableAccessor.tableExists(conn, table)) {
+ LOG.warn("Table "+ table+" does not exists, skipping it.");
+ continue;
+ }
+ TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
+
+ // write a copy of descriptor to the target directory
+ Path target = new Path(backupContext.getBackupStatus(table).getTargetDir());
+ FileSystem targetFs = target.getFileSystem(conf);
+ FSTableDescriptors descriptors =
+ new FSTableDescriptors(conf, targetFs, FSUtils.getRootDir(conf));
+ descriptors.createTableDescriptorForTableDirectory(target, orig, false);
+ LOG.debug("Attempting to copy table info for:" + table + " target: " + target +
+ " descriptor: " + orig);
+ LOG.debug("Finished copying tableinfo.");
+ List<HRegionInfo> regions = null;
+ regions = MetaTableAccessor.getTableRegions(conn, table);
+ // For each region, write the region info to disk
+ LOG.debug("Starting to write region info for table " + table);
+ for (HRegionInfo regionInfo : regions) {
+ Path regionDir =
+ HRegion.getRegionDir(new Path(backupContext.getBackupStatus(table).getTargetDir()),
regionInfo);
- regionDir =
- new Path(backupContext.getBackupStatus(table).getTargetDir(), regionDir.getName());
- writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
- }
- LOG.debug("Finished writing region info for table " + table);
+ regionDir =
+ new Path(backupContext.getBackupStatus(table).getTargetDir(), regionDir.getName());
+ writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
}
- } catch (IOException e) {
- throw new BackupException(e);
+ LOG.debug("Finished writing region info for table " + table);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/260fafe5/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java
index 007ca9e..37bfcc2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -58,6 +59,7 @@ import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.HStore;
@@ -146,16 +148,15 @@ public class RestoreServerUtil {
return regionDirList;
}
- static void modifyTableSync(Admin admin, HTableDescriptor desc)
- throws IOException {
- admin.modifyTable(desc.getTableName(), desc);
+ static void modifyTableSync(MasterServices svc, HTableDescriptor desc) throws IOException {
+ svc.modifyTable(desc.getTableName(), desc, HConstants.NO_NONCE, HConstants.NO_NONCE);
Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
setFirst(0);
setSecond(0);
}};
int i = 0;
do {
- status = admin.getAlterStatus(desc.getTableName());
+ status = svc.getAssignmentManager().getReopenStatus(desc.getTableName());
if (status.getSecond() != 0) {
LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond()
+ " regions updated.");
@@ -180,6 +181,7 @@ public class RestoreServerUtil {
* During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently
* tableNames and newTablesNames only contain single table, will be expanded to multiple tables in
* the future
+ * @param svc MasterServices
* @param tableBackupPath backup path
* @param logDirs : incremental backup folders, which contains WAL
* @param tableNames : source tableNames(table names were backuped)
@@ -187,7 +189,7 @@ public class RestoreServerUtil {
* @param incrBackupId incremental backup Id
* @throws IOException exception
*/
- public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs,
+ public void incrementalRestoreTable(MasterServices svc, Path tableBackupPath, Path[] logDirs,
TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException {
if (tableNames.length != newTableNames.length) {
@@ -197,54 +199,51 @@ public class RestoreServerUtil {
// for incremental backup image, expect the table already created either by user or previous
// full backup. Here, check that all new tables exists
- try (Admin admin = conn.getAdmin()) {
- for (TableName tableName : newTableNames) {
- if (!MetaTableAccessor.tableExists(conn, tableName)) {
- admin.close();
- throw new IOException("HBase table " + tableName
+ for (TableName tableName : newTableNames) {
+ if (!MetaTableAccessor.tableExists(svc.getConnection(), tableName)) {
+ throw new IOException("HBase table " + tableName
+ " does not exist. Create the table first, e.g. by restoring a full backup.");
- }
}
- // adjust table schema
- for (int i = 0; i < tableNames.length; i++) {
- TableName tableName = tableNames[i];
- HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
- LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId);
-
- TableName newTableName = newTableNames[i];
- HTableDescriptor newTableDescriptor = admin.getTableDescriptor(newTableName);
- List<HColumnDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
- List<HColumnDescriptor> existingFamilies =
- Arrays.asList(newTableDescriptor.getColumnFamilies());
- boolean schemaChangeNeeded = false;
- for (HColumnDescriptor family : families) {
- if (!existingFamilies.contains(family)) {
- newTableDescriptor.addFamily(family);
- schemaChangeNeeded = true;
- }
- }
- for (HColumnDescriptor family : existingFamilies) {
- if (!families.contains(family)) {
- newTableDescriptor.removeFamily(family.getName());
- schemaChangeNeeded = true;
- }
+ }
+ // adjust table schema
+ for (int i = 0; i < tableNames.length; i++) {
+ TableName tableName = tableNames[i];
+ HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
+ LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId);
+
+ TableName newTableName = newTableNames[i];
+ HTableDescriptor newTableDescriptor = svc.getTableDescriptors().get(newTableName);
+ List<HColumnDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
+ List<HColumnDescriptor> existingFamilies =
+ Arrays.asList(newTableDescriptor.getColumnFamilies());
+ boolean schemaChangeNeeded = false;
+ for (HColumnDescriptor family : families) {
+ if (!existingFamilies.contains(family)) {
+ newTableDescriptor.addFamily(family);
+ schemaChangeNeeded = true;
}
- if (schemaChangeNeeded) {
- RestoreServerUtil.modifyTableSync(admin, newTableDescriptor);
- LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + newTableDescriptor);
+ }
+ for (HColumnDescriptor family : existingFamilies) {
+ if (!families.contains(family)) {
+ newTableDescriptor.removeFamily(family.getName());
+ schemaChangeNeeded = true;
}
}
- IncrementalRestoreService restoreService =
- BackupRestoreServerFactory.getIncrementalRestoreService(conf);
-
- restoreService.run(logDirs, tableNames, newTableNames);
+ if (schemaChangeNeeded) {
+ RestoreServerUtil.modifyTableSync(svc, newTableDescriptor);
+ LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + newTableDescriptor);
+ }
}
+ IncrementalRestoreService restoreService =
+ BackupRestoreServerFactory.getIncrementalRestoreService(conf);
+
+ restoreService.run(logDirs, tableNames, newTableNames);
}
- public void fullRestoreTable(Connection conn, Path tableBackupPath, TableName tableName,
+ public void fullRestoreTable(MasterServices svc, Path tableBackupPath, TableName tableName,
TableName newTableName, boolean converted, boolean truncateIfExists, String lastIncrBackupId)
throws IOException {
- restoreTableAndCreate(conn, tableName, newTableName, tableBackupPath, converted, truncateIfExists,
+ restoreTableAndCreate(svc, tableName, newTableName, tableBackupPath, converted, truncateIfExists,
lastIncrBackupId);
}
@@ -363,7 +362,7 @@ public class RestoreServerUtil {
return null;
}
- private void restoreTableAndCreate(Connection conn, TableName tableName, TableName newTableName,
+ private void restoreTableAndCreate(MasterServices svc, TableName tableName, TableName newTableName,
Path tableBackupPath, boolean converted, boolean truncateIfExists, String lastIncrBackupId)
throws IOException {
if (newTableName == null || newTableName.equals("")) {
@@ -377,99 +376,97 @@ public class RestoreServerUtil {
LOG.debug("Retrieved descriptor: " + tableDescriptor + " thru " + lastIncrBackupId);
}
- try (HBaseAdmin hbadmin = (HBaseAdmin) conn.getAdmin();) {
- if (tableDescriptor == null) {
- Path tableSnapshotPath = getTableSnapshotPath(backupRootPath, tableName, backupId);
- if (fileSys.exists(tableSnapshotPath)) {
- // snapshot path exist means the backup path is in HDFS
- // check whether snapshot dir already recorded for target table
- if (snapshotMap.get(tableName) != null) {
- SnapshotDescription desc =
- SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath);
- SnapshotManifest manifest = SnapshotManifest.open(conf,fileSys,tableSnapshotPath,desc);
- tableDescriptor = manifest.getTableDescriptor();
- LOG.debug("obtained descriptor from " + manifest);
- } else {
- tableDescriptor = getTableDesc(tableName);
- snapshotMap.put(tableName, getTableInfoPath(tableName));
- LOG.debug("obtained descriptor from snapshot for " + tableName);
- }
- if (tableDescriptor == null) {
- LOG.debug("Found no table descriptor in the snapshot dir, previous schema was lost");
- }
- } else if (converted) {
- // first check if this is a converted backup image
- LOG.error("convert will be supported in a future jira");
- }
- }
-
- Path tableArchivePath = getTableArchivePath(tableName);
- if (tableArchivePath == null) {
- if (tableDescriptor != null) {
- // find table descriptor but no archive dir => the table is empty, create table and exit
- if(LOG.isDebugEnabled()) {
- LOG.debug("find table descriptor but no archive dir for table " + tableName
- + ", will only create table");
- }
- tableDescriptor.setName(newTableName);
- checkAndCreateTable(hbadmin, tableBackupPath, tableName, newTableName, null,
- tableDescriptor, truncateIfExists);
- return;
+ if (tableDescriptor == null) {
+ Path tableSnapshotPath = getTableSnapshotPath(backupRootPath, tableName, backupId);
+ if (fileSys.exists(tableSnapshotPath)) {
+ // snapshot path exist means the backup path is in HDFS
+ // check whether snapshot dir already recorded for target table
+ if (snapshotMap.get(tableName) != null) {
+ SnapshotDescription desc =
+ SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath);
+ SnapshotManifest manifest = SnapshotManifest.open(conf,fileSys,tableSnapshotPath,desc);
+ tableDescriptor = manifest.getTableDescriptor();
+ LOG.debug("obtained descriptor from " + manifest);
} else {
- throw new IllegalStateException("Cannot restore hbase table because directory '"
- + " tableArchivePath is null.");
+ tableDescriptor = getTableDesc(tableName);
+ snapshotMap.put(tableName, getTableInfoPath(tableName));
+ LOG.debug("obtained descriptor from snapshot for " + tableName);
}
+ if (tableDescriptor == null) {
+ LOG.debug("Found no table descriptor in the snapshot dir, previous schema was lost");
+ }
+ } else if (converted) {
+ // first check if this is a converted backup image
+ LOG.error("convert will be supported in a future jira");
}
+ }
- if (tableDescriptor == null) {
- LOG.debug("New descriptor for " + newTableName);
- tableDescriptor = new HTableDescriptor(newTableName);
- } else {
+ Path tableArchivePath = getTableArchivePath(tableName);
+ if (tableArchivePath == null) {
+ if (tableDescriptor != null) {
+ // find table descriptor but no archive dir => the table is empty, create table and exit
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("find table descriptor but no archive dir for table " + tableName
+ + ", will only create table");
+ }
tableDescriptor.setName(newTableName);
+ checkAndCreateTable(svc, tableBackupPath, tableName, newTableName, null,
+ tableDescriptor, truncateIfExists);
+ return;
+ } else {
+ throw new IllegalStateException("Cannot restore hbase table because directory '"
+ + " tableArchivePath is null.");
}
+ }
- if (!converted) {
- // record all region dirs:
- // load all files in dir
- try {
- ArrayList<Path> regionPathList = getRegionList(tableName);
-
- // should only try to create the table with all region informations, so we could pre-split
- // the regions in fine grain
- checkAndCreateTable(hbadmin, tableBackupPath, tableName, newTableName, regionPathList,
- tableDescriptor, truncateIfExists);
- if (tableArchivePath != null) {
- // start real restore through bulkload
- // if the backup target is on local cluster, special action needed
- Path tempTableArchivePath = checkLocalAndBackup(tableArchivePath);
- if (tempTableArchivePath.equals(tableArchivePath)) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("TableArchivePath for bulkload using existPath: " + tableArchivePath);
- }
- } else {
- regionPathList = getRegionList(tempTableArchivePath); // point to the tempDir
- if(LOG.isDebugEnabled()) {
- LOG.debug("TableArchivePath for bulkload using tempPath: " + tempTableArchivePath);
- }
+ if (tableDescriptor == null) {
+ LOG.debug("New descriptor for " + newTableName);
+ tableDescriptor = new HTableDescriptor(newTableName);
+ } else {
+ tableDescriptor.setName(newTableName);
+ }
+
+ if (!converted) {
+ // record all region dirs:
+ // load all files in dir
+ try {
+ ArrayList<Path> regionPathList = getRegionList(tableName);
+
+ // should only try to create the table with all region informations, so we could pre-split
+ // the regions in fine grain
+ checkAndCreateTable(svc, tableBackupPath, tableName, newTableName, regionPathList,
+ tableDescriptor, truncateIfExists);
+ if (tableArchivePath != null) {
+ // start real restore through bulkload
+ // if the backup target is on local cluster, special action needed
+ Path tempTableArchivePath = checkLocalAndBackup(tableArchivePath);
+ if (tempTableArchivePath.equals(tableArchivePath)) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("TableArchivePath for bulkload using existPath: " + tableArchivePath);
+ }
+ } else {
+ regionPathList = getRegionList(tempTableArchivePath); // point to the tempDir
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("TableArchivePath for bulkload using tempPath: " + tempTableArchivePath);
}
+ }
- LoadIncrementalHFiles loader = createLoader(tempTableArchivePath, false);
- for (Path regionPath : regionPathList) {
- String regionName = regionPath.toString();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Restoring HFiles from directory " + regionName);
- }
- String[] args = { regionName, newTableName.getNameAsString() };
- loader.run(args);
+ LoadIncrementalHFiles loader = createLoader(tempTableArchivePath, false);
+ for (Path regionPath : regionPathList) {
+ String regionName = regionPath.toString();
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Restoring HFiles from directory " + regionName);
}
+ String[] args = { regionName, newTableName.getNameAsString() };
+ loader.run(args);
}
- // we do not recovered edits
- } catch (Exception e) {
- throw new IllegalStateException("Cannot restore hbase table", e);
}
- } else {
- LOG.debug("convert will be supported in a future jira");
+ // we do not recovered edits
+ } catch (Exception e) {
+ throw new IllegalStateException("Cannot restore hbase table", e);
}
+ } else {
+ LOG.debug("convert will be supported in a future jira");
}
}
@@ -662,6 +659,7 @@ public class RestoreServerUtil {
/**
* Prepare the table for bulkload, most codes copied from
* {@link LoadIncrementalHFiles#createTable(String, String)}
+ * @param svc MasterServices
* @param tableBackupPath path
* @param tableName table name
* @param targetTableName target table name
@@ -669,18 +667,18 @@ public class RestoreServerUtil {
* @param htd table descriptor
* @throws IOException exception
*/
- private void checkAndCreateTable(HBaseAdmin hbadmin, Path tableBackupPath, TableName tableName,
+ private void checkAndCreateTable(MasterServices svc, Path tableBackupPath, TableName tableName,
TableName targetTableName, ArrayList<Path> regionDirList,
HTableDescriptor htd, boolean truncateIfExists)
throws IOException {
try {
boolean createNew = false;
- if (hbadmin.tableExists(targetTableName)) {
+ if (MetaTableAccessor.tableExists(svc.getConnection(), targetTableName)) {
if(truncateIfExists) {
LOG.info("Truncating exising target table '" + targetTableName +
"', preserving region splits");
- hbadmin.disableTable(targetTableName);
- hbadmin.truncateTable(targetTableName, true);
+ svc.disableTable(targetTableName, HConstants.NO_NONCE, HConstants.NO_NONCE);
+ svc.truncateTable(targetTableName, true, HConstants.NO_NONCE, HConstants.NO_NONCE);
} else{
LOG.info("Using exising target table '" + targetTableName + "'");
}
@@ -689,16 +687,16 @@ public class RestoreServerUtil {
}
if (createNew){
LOG.info("Creating target table '" + targetTableName + "'");
- // if no region directory given, create the table and return
+ byte[][] keys = null;
if (regionDirList == null || regionDirList.size() == 0) {
- hbadmin.createTable(htd);
- return;
+ svc.createTable(htd, null, HConstants.NO_NONCE, HConstants.NO_NONCE);
+ } else {
+ keys = generateBoundaryKeys(regionDirList);
+ // create table using table descriptor and region boundaries
+ svc.createTable(htd, keys, HConstants.NO_NONCE, HConstants.NO_NONCE);
}
- byte[][] keys = generateBoundaryKeys(regionDirList);
- // create table using table descriptor and region boundaries
- hbadmin.createTable(htd, keys);
long startTime = EnvironmentEdgeManager.currentTime();
- while (!hbadmin.isTableAvailable(targetTableName, keys)) {
+ while (!((ClusterConnection)svc.getConnection()).isTableAvailable(targetTableName, keys)) {
Thread.sleep(100);
if (EnvironmentEdgeManager.currentTime() - startTime > TABLE_AVAILABILITY_WAIT_TIME) {
throw new IOException("Time out "+TABLE_AVAILABILITY_WAIT_TIME+