You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2017/12/02 01:22:47 UTC
[04/10] hbase git commit: HBASE-19407 [branch-2] Remove backup/restore
http://git-wip-us.apache.org/repos/asf/hbase/blob/79ac70ac/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
deleted file mode 100644
index 9d87612..0000000
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
+++ /dev/null
@@ -1,747 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.util;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URLDecoder;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.RestoreRequest;
-import org.apache.hadoop.hbase.backup.impl.BackupManifest;
-import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * A collection for methods used by multiple classes to backup HBase tables.
- */
-@InterfaceAudience.Private
-public final class BackupUtils {
- protected static final Log LOG = LogFactory.getLog(BackupUtils.class);
- public static final String LOGNAME_SEPARATOR = ".";
- public static final int MILLISEC_IN_HOUR = 3600000;
-
- private BackupUtils() {
- throw new AssertionError("Instantiating utility class...");
- }
-
- /**
- * Loop through the RS log timestamp map for the tables, for each RS, find the min timestamp value
- * for the RS among the tables.
- * @param rsLogTimestampMap timestamp map
- * @return the min timestamp of each RS
- */
- public static HashMap<String, Long> getRSLogTimestampMins(
- HashMap<TableName, HashMap<String, Long>> rsLogTimestampMap) {
-
- if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) {
- return null;
- }
-
- HashMap<String, Long> rsLogTimestampMins = new HashMap<String, Long>();
- HashMap<String, HashMap<TableName, Long>> rsLogTimestampMapByRS =
- new HashMap<String, HashMap<TableName, Long>>();
-
- for (Entry<TableName, HashMap<String, Long>> tableEntry : rsLogTimestampMap.entrySet()) {
- TableName table = tableEntry.getKey();
- HashMap<String, Long> rsLogTimestamp = tableEntry.getValue();
- for (Entry<String, Long> rsEntry : rsLogTimestamp.entrySet()) {
- String rs = rsEntry.getKey();
- Long ts = rsEntry.getValue();
- if (!rsLogTimestampMapByRS.containsKey(rs)) {
- rsLogTimestampMapByRS.put(rs, new HashMap<TableName, Long>());
- rsLogTimestampMapByRS.get(rs).put(table, ts);
- } else {
- rsLogTimestampMapByRS.get(rs).put(table, ts);
- }
- }
- }
-
- for (Entry<String, HashMap<TableName, Long>> entry : rsLogTimestampMapByRS.entrySet()) {
- String rs = entry.getKey();
- rsLogTimestampMins.put(rs, BackupUtils.getMinValue(entry.getValue()));
- }
-
- return rsLogTimestampMins;
- }
-
- /**
- * copy out Table RegionInfo into incremental backup image need to consider move this logic into
- * HBackupFileSystem
- * @param conn connection
- * @param backupInfo backup info
- * @param conf configuration
- * @throws IOException exception
- * @throws InterruptedException exception
- */
- public static void
- copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf)
- throws IOException, InterruptedException {
- Path rootDir = FSUtils.getRootDir(conf);
- FileSystem fs = rootDir.getFileSystem(conf);
-
- // for each table in the table set, copy out the table info and region
- // info files in the correct directory structure
- for (TableName table : backupInfo.getTables()) {
-
- if (!MetaTableAccessor.tableExists(conn, table)) {
- LOG.warn("Table " + table + " does not exists, skipping it.");
- continue;
- }
- TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
-
- // write a copy of descriptor to the target directory
- Path target = new Path(backupInfo.getTableBackupDir(table));
- FileSystem targetFs = target.getFileSystem(conf);
- FSTableDescriptors descriptors =
- new FSTableDescriptors(conf, targetFs, FSUtils.getRootDir(conf));
- descriptors.createTableDescriptorForTableDirectory(target, orig, false);
- LOG.debug("Attempting to copy table info for:" + table + " target: " + target
- + " descriptor: " + orig);
- LOG.debug("Finished copying tableinfo.");
- List<RegionInfo> regions = null;
- regions = MetaTableAccessor.getTableRegions(conn, table);
- // For each region, write the region info to disk
- LOG.debug("Starting to write region info for table " + table);
- for (RegionInfo regionInfo : regions) {
- Path regionDir =
- HRegion.getRegionDir(new Path(backupInfo.getTableBackupDir(table)), regionInfo);
- regionDir = new Path(backupInfo.getTableBackupDir(table), regionDir.getName());
- writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
- }
- LOG.debug("Finished writing region info for table " + table);
- }
- }
-
- /**
- * Write the .regioninfo file on-disk.
- */
- public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs,
- final Path regionInfoDir, RegionInfo regionInfo) throws IOException {
- final byte[] content = RegionInfo.toDelimitedByteArray(regionInfo);
- Path regionInfoFile = new Path(regionInfoDir, "." + HConstants.REGIONINFO_QUALIFIER_STR);
- // First check to get the permissions
- FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
- // Write the RegionInfo file content
- FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null);
- try {
- out.write(content);
- } finally {
- out.close();
- }
- }
-
- /**
- * Parses hostname:port from WAL file path
- * @param p path to WAL file
- * @return hostname:port
- */
- public static String parseHostNameFromLogFile(Path p) {
- try {
- if (AbstractFSWALProvider.isArchivedLogFile(p)) {
- return BackupUtils.parseHostFromOldLog(p);
- } else {
- ServerName sname = AbstractFSWALProvider.getServerNameFromWALDirectoryName(p);
- if (sname != null) {
- return sname.getAddress().toString();
- } else {
- LOG.error("Skip log file (can't parse): " + p);
- return null;
- }
- }
- } catch (Exception e) {
- LOG.error("Skip log file (can't parse): " + p, e);
- return null;
- }
- }
-
- /**
- * Returns WAL file name
- * @param walFileName WAL file name
- * @return WAL file name
- * @throws IOException exception
- * @throws IllegalArgumentException exception
- */
- public static String getUniqueWALFileNamePart(String walFileName) throws IOException {
- return getUniqueWALFileNamePart(new Path(walFileName));
- }
-
- /**
- * Returns WAL file name
- * @param p WAL file path
- * @return WAL file name
- * @throws IOException exception
- */
- public static String getUniqueWALFileNamePart(Path p) throws IOException {
- return p.getName();
- }
-
- /**
- * Get the total length of files under the given directory recursively.
- * @param fs The hadoop file system
- * @param dir The target directory
- * @return the total length of files
- * @throws IOException exception
- */
- public static long getFilesLength(FileSystem fs, Path dir) throws IOException {
- long totalLength = 0;
- FileStatus[] files = FSUtils.listStatus(fs, dir);
- if (files != null) {
- for (FileStatus fileStatus : files) {
- if (fileStatus.isDirectory()) {
- totalLength += getFilesLength(fs, fileStatus.getPath());
- } else {
- totalLength += fileStatus.getLen();
- }
- }
- }
- return totalLength;
- }
-
- /**
- * Get list of all old WAL files (WALs and archive)
- * @param c configuration
- * @param hostTimestampMap {host,timestamp} map
- * @return list of WAL files
- * @throws IOException exception
- */
- public static List<String> getWALFilesOlderThan(final Configuration c,
- final HashMap<String, Long> hostTimestampMap) throws IOException {
- Path rootDir = FSUtils.getRootDir(c);
- Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
- Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
- List<String> logFiles = new ArrayList<String>();
-
- PathFilter filter = new PathFilter() {
-
- @Override
- public boolean accept(Path p) {
- try {
- if (AbstractFSWALProvider.isMetaFile(p)) {
- return false;
- }
- String host = parseHostNameFromLogFile(p);
- if (host == null) {
- return false;
- }
- Long oldTimestamp = hostTimestampMap.get(host);
- Long currentLogTS = BackupUtils.getCreationTime(p);
- return currentLogTS <= oldTimestamp;
- } catch (Exception e) {
- LOG.warn("Can not parse" + p, e);
- return false;
- }
- }
- };
- FileSystem fs = FileSystem.get(c);
- logFiles = BackupUtils.getFiles(fs, logDir, logFiles, filter);
- logFiles = BackupUtils.getFiles(fs, oldLogDir, logFiles, filter);
- return logFiles;
- }
-
- public static TableName[] parseTableNames(String tables) {
- if (tables == null) {
- return null;
- }
- String[] tableArray = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
-
- TableName[] ret = new TableName[tableArray.length];
- for (int i = 0; i < tableArray.length; i++) {
- ret[i] = TableName.valueOf(tableArray[i]);
- }
- return ret;
- }
-
- /**
- * Check whether the backup path exist
- * @param backupStr backup
- * @param conf configuration
- * @return Yes if path exists
- * @throws IOException exception
- */
- public static boolean checkPathExist(String backupStr, Configuration conf) throws IOException {
- boolean isExist = false;
- Path backupPath = new Path(backupStr);
- FileSystem fileSys = backupPath.getFileSystem(conf);
- String targetFsScheme = fileSys.getUri().getScheme();
- if (LOG.isTraceEnabled()) {
- LOG.trace("Schema of given url: " + backupStr + " is: " + targetFsScheme);
- }
- if (fileSys.exists(backupPath)) {
- isExist = true;
- }
- return isExist;
- }
-
- /**
- * Check target path first, confirm it doesn't exist before backup
- * @param backupRootPath backup destination path
- * @param conf configuration
- * @throws IOException exception
- */
- public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException {
- boolean targetExists = false;
- try {
- targetExists = checkPathExist(backupRootPath, conf);
- } catch (IOException e) {
- String expMsg = e.getMessage();
- String newMsg = null;
- if (expMsg.contains("No FileSystem for scheme")) {
- newMsg =
- "Unsupported filesystem scheme found in the backup target url. Error Message: "
- + newMsg;
- LOG.error(newMsg);
- throw new IOException(newMsg);
- } else {
- throw e;
- }
- }
-
- if (targetExists) {
- LOG.info("Using existing backup root dir: " + backupRootPath);
- } else {
- LOG.info("Backup root dir " + backupRootPath + " does not exist. Will be created.");
- }
- }
-
- /**
- * Get the min value for all the Values a map.
- * @param map map
- * @return the min value
- */
- public static <T> Long getMinValue(HashMap<T, Long> map) {
- Long minTimestamp = null;
- if (map != null) {
- ArrayList<Long> timestampList = new ArrayList<Long>(map.values());
- Collections.sort(timestampList);
- // The min among all the RS log timestamps will be kept in backup system table table.
- minTimestamp = timestampList.get(0);
- }
- return minTimestamp;
- }
-
- /**
- * Parses host name:port from archived WAL path
- * @param p path
- * @return host name
- * @throws IOException exception
- */
- public static String parseHostFromOldLog(Path p) {
- try {
- String n = p.getName();
- int idx = n.lastIndexOf(LOGNAME_SEPARATOR);
- String s = URLDecoder.decode(n.substring(0, idx), "UTF8");
- return ServerName.parseHostname(s) + ":" + ServerName.parsePort(s);
- } catch (Exception e) {
- LOG.warn("Skip log file (can't parse): " + p);
- return null;
- }
- }
-
- /**
- * Given the log file, parse the timestamp from the file name. The timestamp is the last number.
- * @param p a path to the log file
- * @return the timestamp
- * @throws IOException exception
- */
- public static Long getCreationTime(Path p) throws IOException {
- int idx = p.getName().lastIndexOf(LOGNAME_SEPARATOR);
- if (idx < 0) {
- throw new IOException("Cannot parse timestamp from path " + p);
- }
- String ts = p.getName().substring(idx + 1);
- return Long.parseLong(ts);
- }
-
- public static List<String> getFiles(FileSystem fs, Path rootDir, List<String> files,
- PathFilter filter) throws FileNotFoundException, IOException {
- RemoteIterator<LocatedFileStatus> it = fs.listFiles(rootDir, true);
-
- while (it.hasNext()) {
- LocatedFileStatus lfs = it.next();
- if (lfs.isDirectory()) {
- continue;
- }
- // apply filter
- if (filter.accept(lfs.getPath())) {
- files.add(lfs.getPath().toString());
- }
- }
- return files;
- }
-
- public static void cleanupBackupData(BackupInfo context, Configuration conf) throws IOException {
- cleanupHLogDir(context, conf);
- cleanupTargetDir(context, conf);
- }
-
- /**
- * Clean up directories which are generated when DistCp copying hlogs
- * @param backupInfo backup info
- * @param conf configuration
- * @throws IOException exception
- */
- private static void cleanupHLogDir(BackupInfo backupInfo, Configuration conf) throws IOException {
-
- String logDir = backupInfo.getHLogTargetDir();
- if (logDir == null) {
- LOG.warn("No log directory specified for " + backupInfo.getBackupId());
- return;
- }
-
- Path rootPath = new Path(logDir).getParent();
- FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
- FileStatus[] files = listStatus(fs, rootPath, null);
- if (files == null) {
- return;
- }
- for (FileStatus file : files) {
- LOG.debug("Delete log files: " + file.getPath().getName());
- fs.delete(file.getPath(), true);
- }
- }
-
- private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) {
- try {
- // clean up the data at target directory
- LOG.debug("Trying to cleanup up target dir : " + backupInfo.getBackupId());
- String targetDir = backupInfo.getBackupRootDir();
- if (targetDir == null) {
- LOG.warn("No target directory specified for " + backupInfo.getBackupId());
- return;
- }
-
- FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
-
- for (TableName table : backupInfo.getTables()) {
- Path targetDirPath =
- new Path(getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(),
- table));
- if (outputFs.delete(targetDirPath, true)) {
- LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
- } else {
- LOG.info("No data has been found in " + targetDirPath.toString() + ".");
- }
-
- Path tableDir = targetDirPath.getParent();
- FileStatus[] backups = listStatus(outputFs, tableDir, null);
- if (backups == null || backups.length == 0) {
- outputFs.delete(tableDir, true);
- LOG.debug(tableDir.toString() + " is empty, remove it.");
- }
- }
- outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true);
- } catch (IOException e1) {
- LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at "
- + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
- }
- }
-
- /**
- * Given the backup root dir, backup id and the table name, return the backup image location,
- * which is also where the backup manifest file is. return value look like:
- * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/"
- * @param backupRootDir backup root directory
- * @param backupId backup id
- * @param tableName table name
- * @return backupPath String for the particular table
- */
- public static String
- getTableBackupDir(String backupRootDir, String backupId, TableName tableName) {
- return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
- + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
- + Path.SEPARATOR;
- }
-
- /**
- * Sort history list by start time in descending order.
- * @param historyList history list
- * @return sorted list of BackupCompleteData
- */
- public static ArrayList<BackupInfo> sortHistoryListDesc(ArrayList<BackupInfo> historyList) {
- ArrayList<BackupInfo> list = new ArrayList<BackupInfo>();
- TreeMap<String, BackupInfo> map = new TreeMap<String, BackupInfo>();
- for (BackupInfo h : historyList) {
- map.put(Long.toString(h.getStartTs()), h);
- }
- Iterator<String> i = map.descendingKeySet().iterator();
- while (i.hasNext()) {
- list.add(map.get(i.next()));
- }
- return list;
- }
-
- /**
- * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates
- * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and
- * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException.
- * @param fs file system
- * @param dir directory
- * @param filter path filter
- * @return null if dir is empty or doesn't exist, otherwise FileStatus array
- */
- public static FileStatus[]
- listStatus(final FileSystem fs, final Path dir, final PathFilter filter) throws IOException {
- FileStatus[] status = null;
- try {
- status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
- } catch (FileNotFoundException fnfe) {
- // if directory doesn't exist, return null
- if (LOG.isTraceEnabled()) {
- LOG.trace(dir + " doesn't exist");
- }
- }
- if (status == null || status.length < 1) return null;
- return status;
- }
-
- /**
- * Return the 'path' component of a Path. In Hadoop, Path is an URI. This method returns the
- * 'path' component of a Path's URI: e.g. If a Path is
- * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>, this method returns
- * <code>/hbase_trunk/TestTable/compaction.dir</code>. This method is useful if you want to print
- * out a Path without qualifying Filesystem instance.
- * @param p file system Path whose 'path' component we are to return.
- * @return Path portion of the Filesystem
- */
- public static String getPath(Path p) {
- return p.toUri().getPath();
- }
-
- /**
- * Given the backup root dir and the backup id, return the log file location for an incremental
- * backup.
- * @param backupRootDir backup root directory
- * @param backupId backup id
- * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738"
- */
- public static String getLogBackupDir(String backupRootDir, String backupId) {
- return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
- + HConstants.HREGION_LOGDIR_NAME;
- }
-
- private static List<BackupInfo> getHistory(Configuration conf, Path backupRootPath)
- throws IOException {
- // Get all (n) history from backup root destination
- FileSystem fs = FileSystem.get(conf);
- RemoteIterator<LocatedFileStatus> it = fs.listLocatedStatus(backupRootPath);
-
- List<BackupInfo> infos = new ArrayList<BackupInfo>();
- while (it.hasNext()) {
- LocatedFileStatus lfs = it.next();
- if (!lfs.isDirectory()) continue;
- String backupId = lfs.getPath().getName();
- try {
- BackupInfo info = loadBackupInfo(backupRootPath, backupId, fs);
- infos.add(info);
- } catch (IOException e) {
- LOG.error("Can not load backup info from: " + lfs.getPath(), e);
- }
- }
- // Sort
- Collections.sort(infos, new Comparator<BackupInfo>() {
-
- @Override
- public int compare(BackupInfo o1, BackupInfo o2) {
- long ts1 = getTimestamp(o1.getBackupId());
- long ts2 = getTimestamp(o2.getBackupId());
- if (ts1 == ts2) return 0;
- return ts1 < ts2 ? 1 : -1;
- }
-
- private long getTimestamp(String backupId) {
- String[] split = backupId.split("_");
- return Long.parseLong(split[1]);
- }
- });
- return infos;
- }
-
- public static List<BackupInfo> getHistory(Configuration conf, int n, Path backupRootPath,
- BackupInfo.Filter... filters) throws IOException {
- List<BackupInfo> infos = getHistory(conf, backupRootPath);
- List<BackupInfo> ret = new ArrayList<BackupInfo>();
- for (BackupInfo info : infos) {
- if (ret.size() == n) {
- break;
- }
- boolean passed = true;
- for (int i = 0; i < filters.length; i++) {
- if (!filters[i].apply(info)) {
- passed = false;
- break;
- }
- }
- if (passed) {
- ret.add(info);
- }
- }
- return ret;
- }
-
- public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, FileSystem fs)
- throws IOException {
- Path backupPath = new Path(backupRootPath, backupId);
-
- RemoteIterator<LocatedFileStatus> it = fs.listFiles(backupPath, true);
- while (it.hasNext()) {
- LocatedFileStatus lfs = it.next();
- if (lfs.getPath().getName().equals(BackupManifest.MANIFEST_FILE_NAME)) {
- // Load BackupManifest
- BackupManifest manifest = new BackupManifest(fs, lfs.getPath().getParent());
- BackupInfo info = manifest.toBackupInfo();
- return info;
- }
- }
- return null;
- }
-
- /**
- * Create restore request.
- * @param backupRootDir backup root dir
- * @param backupId backup id
- * @param check check only
- * @param fromTables table list from
- * @param toTables table list to
- * @param isOverwrite overwrite data
- * @return request obkect
- */
- public static RestoreRequest createRestoreRequest(String backupRootDir, String backupId,
- boolean check, TableName[] fromTables, TableName[] toTables, boolean isOverwrite) {
- RestoreRequest.Builder builder = new RestoreRequest.Builder();
- RestoreRequest request =
- builder.withBackupRootDir(backupRootDir).withBackupId(backupId).withCheck(check)
- .withFromTables(fromTables).withToTables(toTables).withOvewrite(isOverwrite).build();
- return request;
- }
-
- public static boolean validate(HashMap<TableName, BackupManifest> backupManifestMap,
- Configuration conf) throws IOException {
- boolean isValid = true;
-
- for (Entry<TableName, BackupManifest> manifestEntry : backupManifestMap.entrySet()) {
- TableName table = manifestEntry.getKey();
- TreeSet<BackupImage> imageSet = new TreeSet<BackupImage>();
-
- ArrayList<BackupImage> depList = manifestEntry.getValue().getDependentListByTable(table);
- if (depList != null && !depList.isEmpty()) {
- imageSet.addAll(depList);
- }
-
- LOG.info("Dependent image(s) from old to new:");
- for (BackupImage image : imageSet) {
- String imageDir =
- HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table);
- if (!BackupUtils.checkPathExist(imageDir, conf)) {
- LOG.error("ERROR: backup image does not exist: " + imageDir);
- isValid = false;
- break;
- }
- LOG.info("Backup image: " + image.getBackupId() + " for '" + table + "' is available");
- }
- }
- return isValid;
- }
-
- public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit)
- throws IOException {
- FileSystem fs = FileSystem.get(conf);
- String tmp =
- conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
- Path path =
- new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-"
- + EnvironmentEdgeManager.currentTime());
- if (deleteOnExit) {
- fs.deleteOnExit(path);
- }
- return path;
- }
-
- public static Path getBulkOutputDir(String tableName, Configuration conf) throws IOException {
- return getBulkOutputDir(tableName, conf, true);
- }
-
- public static String getFileNameCompatibleString(TableName table) {
- return table.getNamespaceAsString() + "-" + table.getQualifierAsString();
- }
-
- public static boolean failed(int result) {
- return result != 0;
- }
-
- public static boolean succeeded(int result) {
- return result == 0;
- }
-
- public static LoadIncrementalHFiles createLoader(Configuration config) throws IOException {
- // set configuration for restore:
- // LoadIncrementalHFile needs more time
- // <name>hbase.rpc.timeout</name> <value>600000</value>
- // calculates
- Configuration conf = new Configuration(config);
- conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, MILLISEC_IN_HOUR);
-
- // By default, it is 32 and loader will fail if # of files in any region exceed this
- // limit. Bad for snapshot restore.
- conf.setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY, Integer.MAX_VALUE);
- conf.set(LoadIncrementalHFiles.IGNORE_UNMATCHED_CF_CONF_KEY, "yes");
- LoadIncrementalHFiles loader = null;
- try {
- loader = new LoadIncrementalHFiles(conf);
- } catch (Exception e) {
- throw new IOException(e);
- }
- return loader;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/79ac70ac/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
deleted file mode 100644
index b00351b..0000000
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++ /dev/null
@@ -1,518 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.util;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.RestoreJob;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
-
-/**
- * A collection for methods used by multiple classes to restore HBase tables.
- */
-@InterfaceAudience.Private
-public class RestoreTool {
-
- public static final Log LOG = LogFactory.getLog(BackupUtils.class);
- private final static long TABLE_AVAILABILITY_WAIT_TIME = 180000;
-
- private final String[] ignoreDirs = { HConstants.RECOVERED_EDITS_DIR };
- protected Configuration conf = null;
- protected Path backupRootPath;
- protected String backupId;
- protected FileSystem fs;
-
- // store table name and snapshot dir mapping
- private final HashMap<TableName, Path> snapshotMap = new HashMap<>();
-
- public RestoreTool(Configuration conf, final Path backupRootPath, final String backupId)
- throws IOException {
- this.conf = conf;
- this.backupRootPath = backupRootPath;
- this.backupId = backupId;
- this.fs = backupRootPath.getFileSystem(conf);
- }
-
- /**
- * return value represent path for:
- * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn"
- * @param tableName table name
- * @return path to table archive
- * @throws IOException exception
- */
- Path getTableArchivePath(TableName tableName) throws IOException {
-
- Path baseDir =
- new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId),
- HConstants.HFILE_ARCHIVE_DIRECTORY);
- Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
- Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
- Path tableArchivePath = new Path(archivePath, tableName.getQualifierAsString());
- if (!fs.exists(tableArchivePath) || !fs.getFileStatus(tableArchivePath).isDirectory()) {
- LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " does not exists");
- tableArchivePath = null; // empty table has no archive
- }
- return tableArchivePath;
- }
-
- /**
- * Gets region list
- * @param tableName table name
- * @return RegionList region list
- * @throws FileNotFoundException exception
- * @throws IOException exception
- */
- ArrayList<Path> getRegionList(TableName tableName) throws FileNotFoundException, IOException {
- Path tableArchivePath = getTableArchivePath(tableName);
- ArrayList<Path> regionDirList = new ArrayList<Path>();
- FileStatus[] children = fs.listStatus(tableArchivePath);
- for (FileStatus childStatus : children) {
- // here child refer to each region(Name)
- Path child = childStatus.getPath();
- regionDirList.add(child);
- }
- return regionDirList;
- }
-
-
- void modifyTableSync(Connection conn, TableDescriptor desc) throws IOException {
-
- try (Admin admin = conn.getAdmin();) {
- admin.modifyTable(desc);
- int attempt = 0;
- int maxAttempts = 600;
- while (!admin.isTableAvailable(desc.getTableName())) {
- Thread.sleep(100);
- attempt++;
- if (attempt++ > maxAttempts) {
- throw new IOException("Timeout expired " + (maxAttempts * 100) + "ms");
- }
- }
- } catch (Exception e) {
- throw new IOException(e);
- }
- }
-
- /**
- * During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently
- * tableNames and newTablesNames only contain single table, will be expanded to multiple tables in
- * the future
- * @param conn HBase connection
- * @param tableBackupPath backup path
- * @param logDirs : incremental backup folders, which contains WAL
- * @param tableNames : source tableNames(table names were backuped)
- * @param newTableNames : target tableNames(table names to be restored to)
- * @param incrBackupId incremental backup Id
- * @throws IOException exception
- */
- public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs,
- TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException {
-
- try (Admin admin = conn.getAdmin();) {
- if (tableNames.length != newTableNames.length) {
- throw new IOException("Number of source tables and target tables does not match!");
- }
- FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
-
- // for incremental backup image, expect the table already created either by user or previous
- // full backup. Here, check that all new tables exists
- for (TableName tableName : newTableNames) {
- if (!admin.tableExists(tableName)) {
- throw new IOException("HBase table " + tableName
- + " does not exist. Create the table first, e.g. by restoring a full backup.");
- }
- }
- // adjust table schema
- for (int i = 0; i < tableNames.length; i++) {
- TableName tableName = tableNames[i];
- TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
- LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId);
-
- TableName newTableName = newTableNames[i];
- TableDescriptor newTableDescriptor = admin.getDescriptor(newTableName);
- List<ColumnFamilyDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
- List<ColumnFamilyDescriptor> existingFamilies =
- Arrays.asList(newTableDescriptor.getColumnFamilies());
- TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(newTableDescriptor);
- boolean schemaChangeNeeded = false;
- for (ColumnFamilyDescriptor family : families) {
- if (!existingFamilies.contains(family)) {
- builder.addColumnFamily(family);
- schemaChangeNeeded = true;
- }
- }
- for (ColumnFamilyDescriptor family : existingFamilies) {
- if (!families.contains(family)) {
- builder.removeColumnFamily(family.getName());
- schemaChangeNeeded = true;
- }
- }
- if (schemaChangeNeeded) {
- modifyTableSync(conn, builder.build());
- LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + newTableDescriptor);
- }
- }
- RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
-
- restoreService.run(logDirs, tableNames, newTableNames, false);
- }
- }
-
- public void fullRestoreTable(Connection conn, Path tableBackupPath, TableName tableName,
- TableName newTableName, boolean truncateIfExists, String lastIncrBackupId)
- throws IOException {
- createAndRestoreTable(conn, tableName, newTableName, tableBackupPath, truncateIfExists,
- lastIncrBackupId);
- }
-
- /**
- * Returns value represent path for path to backup table snapshot directory:
- * "/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot"
- * @param backupRootPath backup root path
- * @param tableName table name
- * @param backupId backup Id
- * @return path for snapshot
- */
- Path getTableSnapshotPath(Path backupRootPath, TableName tableName, String backupId) {
- return new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId),
- HConstants.SNAPSHOT_DIR_NAME);
- }
-
- /**
- * Returns value represent path for:
- * ""/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot/snapshot_1396650097621_namespace_table"
- * this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo,
- * .data.manifest (trunk)
- * @param tableName table name
- * @return path to table info
- * @throws FileNotFoundException exception
- * @throws IOException exception
- */
- Path getTableInfoPath(TableName tableName) throws FileNotFoundException, IOException {
- Path tableSnapShotPath = getTableSnapshotPath(backupRootPath, tableName, backupId);
- Path tableInfoPath = null;
-
- // can't build the path directly as the timestamp values are different
- FileStatus[] snapshots = fs.listStatus(tableSnapShotPath);
- for (FileStatus snapshot : snapshots) {
- tableInfoPath = snapshot.getPath();
- // SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest";
- if (tableInfoPath.getName().endsWith("data.manifest")) {
- break;
- }
- }
- return tableInfoPath;
- }
-
- /**
- * Get table descriptor
- * @param tableName is the table backed up
- * @return {@link TableDescriptor} saved in backup image of the table
- */
- TableDescriptor getTableDesc(TableName tableName) throws FileNotFoundException, IOException {
- Path tableInfoPath = this.getTableInfoPath(tableName);
- SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
- SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
- TableDescriptor tableDescriptor = manifest.getTableDescriptor();
- if (!tableDescriptor.getTableName().equals(tableName)) {
- LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: "
- + tableInfoPath.toString());
- LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getTableName().getNameAsString());
- throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName
- + " under tableInfoPath: " + tableInfoPath.toString());
- }
- return tableDescriptor;
- }
-
- private TableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableName,
- String lastIncrBackupId) throws IOException {
- if (lastIncrBackupId != null) {
- String target =
- BackupUtils.getTableBackupDir(backupRootPath.toString(),
- lastIncrBackupId, tableName);
- return FSTableDescriptors.getTableDescriptorFromFs(fileSys, new Path(target));
- }
- return null;
- }
-
- private void createAndRestoreTable(Connection conn, TableName tableName, TableName newTableName,
- Path tableBackupPath, boolean truncateIfExists, String lastIncrBackupId) throws IOException {
- if (newTableName == null) {
- newTableName = tableName;
- }
- FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
-
- // get table descriptor first
- TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, lastIncrBackupId);
- if (tableDescriptor != null) {
- LOG.debug("Retrieved descriptor: " + tableDescriptor + " thru " + lastIncrBackupId);
- }
-
- if (tableDescriptor == null) {
- Path tableSnapshotPath = getTableSnapshotPath(backupRootPath, tableName, backupId);
- if (fileSys.exists(tableSnapshotPath)) {
- // snapshot path exist means the backup path is in HDFS
- // check whether snapshot dir already recorded for target table
- if (snapshotMap.get(tableName) != null) {
- SnapshotDescription desc =
- SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath);
- SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys, tableSnapshotPath, desc);
- tableDescriptor = manifest.getTableDescriptor();
- } else {
- tableDescriptor = getTableDesc(tableName);
- snapshotMap.put(tableName, getTableInfoPath(tableName));
- }
- if (tableDescriptor == null) {
- LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost");
- }
- } else {
- throw new IOException("Table snapshot directory: " +
- tableSnapshotPath + " does not exist.");
- }
- }
-
- Path tableArchivePath = getTableArchivePath(tableName);
- if (tableArchivePath == null) {
- if (tableDescriptor != null) {
- // find table descriptor but no archive dir means the table is empty, create table and exit
- if (LOG.isDebugEnabled()) {
- LOG.debug("find table descriptor but no archive dir for table " + tableName
- + ", will only create table");
- }
- tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor);
- checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor,
- truncateIfExists);
- return;
- } else {
- throw new IllegalStateException("Cannot restore hbase table because directory '"
- + " tableArchivePath is null.");
- }
- }
-
- if (tableDescriptor == null) {
- tableDescriptor = TableDescriptorBuilder.newBuilder(newTableName).build();
- } else {
- tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor);
- }
-
- // record all region dirs:
- // load all files in dir
- try {
- ArrayList<Path> regionPathList = getRegionList(tableName);
-
- // should only try to create the table with all region informations, so we could pre-split
- // the regions in fine grain
- checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, regionPathList,
- tableDescriptor, truncateIfExists);
- RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
- Path[] paths = new Path[regionPathList.size()];
- regionPathList.toArray(paths);
- restoreService.run(paths, new TableName[]{tableName}, new TableName[] {newTableName}, true);
-
- } catch (Exception e) {
- LOG.error(e);
- throw new IllegalStateException("Cannot restore hbase table", e);
- }
- }
-
- /**
- * Gets region list
- * @param tableArchivePath table archive path
- * @return RegionList region list
- * @throws FileNotFoundException exception
- * @throws IOException exception
- */
- ArrayList<Path> getRegionList(Path tableArchivePath) throws FileNotFoundException, IOException {
- ArrayList<Path> regionDirList = new ArrayList<Path>();
- FileStatus[] children = fs.listStatus(tableArchivePath);
- for (FileStatus childStatus : children) {
- // here child refer to each region(Name)
- Path child = childStatus.getPath();
- regionDirList.add(child);
- }
- return regionDirList;
- }
-
- /**
- * Calculate region boundaries and add all the column families to the table descriptor
- * @param regionDirList region dir list
- * @return a set of keys to store the boundaries
- */
- byte[][] generateBoundaryKeys(ArrayList<Path> regionDirList) throws FileNotFoundException,
- IOException {
- TreeMap<byte[], Integer> map = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
- // Build a set of keys to store the boundaries
- // calculate region boundaries and add all the column families to the table descriptor
- for (Path regionDir : regionDirList) {
- LOG.debug("Parsing region dir: " + regionDir);
- Path hfofDir = regionDir;
-
- if (!fs.exists(hfofDir)) {
- LOG.warn("HFileOutputFormat dir " + hfofDir + " not found");
- }
-
- FileStatus[] familyDirStatuses = fs.listStatus(hfofDir);
- if (familyDirStatuses == null) {
- throw new IOException("No families found in " + hfofDir);
- }
-
- for (FileStatus stat : familyDirStatuses) {
- if (!stat.isDirectory()) {
- LOG.warn("Skipping non-directory " + stat.getPath());
- continue;
- }
- boolean isIgnore = false;
- String pathName = stat.getPath().getName();
- for (String ignore : ignoreDirs) {
- if (pathName.contains(ignore)) {
- LOG.warn("Skipping non-family directory" + pathName);
- isIgnore = true;
- break;
- }
- }
- if (isIgnore) {
- continue;
- }
- Path familyDir = stat.getPath();
- LOG.debug("Parsing family dir [" + familyDir.toString() + " in region [" + regionDir + "]");
- // Skip _logs, etc
- if (familyDir.getName().startsWith("_") || familyDir.getName().startsWith(".")) {
- continue;
- }
-
- // start to parse hfile inside one family dir
- Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir));
- for (Path hfile : hfiles) {
- if (hfile.getName().startsWith("_") || hfile.getName().startsWith(".")
- || StoreFileInfo.isReference(hfile.getName())
- || HFileLink.isHFileLink(hfile.getName())) {
- continue;
- }
- HFile.Reader reader = HFile.createReader(fs, hfile, conf);
- final byte[] first, last;
- try {
- reader.loadFileInfo();
- first = reader.getFirstRowKey().get();
- last = reader.getLastRowKey().get();
- LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first="
- + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
-
- // To eventually infer start key-end key boundaries
- Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0;
- map.put(first, value + 1);
- value = map.containsKey(last) ? (Integer) map.get(last) : 0;
- map.put(last, value - 1);
- } finally {
- reader.close();
- }
- }
- }
- }
- return LoadIncrementalHFiles.inferBoundaries(map);
- }
-
- /**
- * Prepare the table for bulkload, most codes copied from
- * {@link LoadIncrementalHFiles#createTable(TableName, String, Admin)}
- * @param conn connection
- * @param tableBackupPath path
- * @param tableName table name
- * @param targetTableName target table name
- * @param regionDirList region directory list
- * @param htd table descriptor
- * @param truncateIfExists truncates table if exists
- * @throws IOException exception
- */
- private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableName tableName,
- TableName targetTableName, ArrayList<Path> regionDirList, TableDescriptor htd,
- boolean truncateIfExists) throws IOException {
- try (Admin admin = conn.getAdmin();) {
- boolean createNew = false;
- if (admin.tableExists(targetTableName)) {
- if (truncateIfExists) {
- LOG.info("Truncating exising target table '" + targetTableName
- + "', preserving region splits");
- admin.disableTable(targetTableName);
- admin.truncateTable(targetTableName, true);
- } else {
- LOG.info("Using exising target table '" + targetTableName + "'");
- }
- } else {
- createNew = true;
- }
- if (createNew) {
- LOG.info("Creating target table '" + targetTableName + "'");
- byte[][] keys = null;
- if (regionDirList == null || regionDirList.size() == 0) {
- admin.createTable(htd, null);
- } else {
- keys = generateBoundaryKeys(regionDirList);
- // create table using table descriptor and region boundaries
- admin.createTable(htd, keys);
- }
-
- }
- long startTime = EnvironmentEdgeManager.currentTime();
- while (!admin.isTableAvailable(targetTableName)) {
- try {
- Thread.sleep(100);
- } catch (InterruptedException ie) {
- Thread.currentThread().interrupt();
- }
- if (EnvironmentEdgeManager.currentTime() - startTime > TABLE_AVAILABILITY_WAIT_TIME) {
- throw new IOException("Time out " + TABLE_AVAILABILITY_WAIT_TIME + "ms expired, table "
- + targetTableName + " is still not available");
- }
- }
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/79ac70ac/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
deleted file mode 100644
index c44efbd..0000000
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ /dev/null
@@ -1,508 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase;
-import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
-import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
-import org.apache.hadoop.hbase.backup.impl.BackupManager;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient;
-import org.apache.hadoop.hbase.backup.impl.IncrementalBackupManager;
-import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient;
-import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.access.SecureTestUtil;
-import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.wal.WALFactory;
-import org.junit.AfterClass;
-import org.junit.Before;
-
-/**
- * This class is only a base for other integration-level backup tests. Do not add tests here.
- * TestBackupSmallTests is where tests that don't require bring machines up/down should go All other
- * tests should have their own classes and extend this one
- */
-public class TestBackupBase {
-
- private static final Log LOG = LogFactory.getLog(TestBackupBase.class);
-
- protected static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- protected static HBaseTestingUtility TEST_UTIL2;
- protected static Configuration conf1 = TEST_UTIL.getConfiguration();
- protected static Configuration conf2;
-
- protected static TableName table1 = TableName.valueOf("table1");
- protected static HTableDescriptor table1Desc;
- protected static TableName table2 = TableName.valueOf("table2");
- protected static TableName table3 = TableName.valueOf("table3");
- protected static TableName table4 = TableName.valueOf("table4");
-
- protected static TableName table1_restore = TableName.valueOf("ns1:table1_restore");
- protected static TableName table2_restore = TableName.valueOf("ns2:table2_restore");
- protected static TableName table3_restore = TableName.valueOf("ns3:table3_restore");
- protected static TableName table4_restore = TableName.valueOf("ns4:table4_restore");
-
- protected static final int NB_ROWS_IN_BATCH = 99;
- protected static final byte[] qualName = Bytes.toBytes("q1");
- protected static final byte[] famName = Bytes.toBytes("f");
-
- protected static String BACKUP_ROOT_DIR = Path.SEPARATOR +"backupUT";
- protected static String BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT";
- protected static String provider = "defaultProvider";
- protected static boolean secure = false;
-
- protected static boolean autoRestoreOnFailure = true;
- protected static boolean setupIsDone = false;
- protected static boolean useSecondCluster = false;
-
-
- static class IncrementalTableBackupClientForTest extends IncrementalTableBackupClient
- {
-
- public IncrementalTableBackupClientForTest() {
- }
-
- public IncrementalTableBackupClientForTest(Connection conn,
- String backupId, BackupRequest request) throws IOException {
- super(conn, backupId, request);
- }
-
- @Override
- public void execute() throws IOException
- {
- // case INCREMENTAL_COPY:
- try {
- // case PREPARE_INCREMENTAL:
- failStageIf(Stage.stage_0);
- beginBackup(backupManager, backupInfo);
-
- failStageIf(Stage.stage_1);
- backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL);
- LOG.debug("For incremental backup, current table set is "
- + backupManager.getIncrementalBackupTableSet());
- newTimestamps = ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap();
- // copy out the table and region info files for each table
- BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
- // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
- convertWALsToHFiles();
- incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()},
- backupInfo.getBackupRootDir());
- failStageIf(Stage.stage_2);
- // Save list of WAL files copied
- backupManager.recordWALFiles(backupInfo.getIncrBackupFileList());
-
- // case INCR_BACKUP_COMPLETE:
- // set overall backup status: complete. Here we make sure to complete the backup.
- // After this checkpoint, even if entering cancel process, will let the backup finished
- // Set the previousTimestampMap which is before this current log roll to the manifest.
- HashMap<TableName, HashMap<String, Long>> previousTimestampMap =
- backupManager.readLogTimestampMap();
- backupInfo.setIncrTimestampMap(previousTimestampMap);
-
- // The table list in backupInfo is good for both full backup and incremental backup.
- // For incremental backup, it contains the incremental backup table set.
- backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
- failStageIf(Stage.stage_3);
-
- HashMap<TableName, HashMap<String, Long>> newTableSetTimestampMap =
- backupManager.readLogTimestampMap();
-
- Long newStartCode =
- BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
- backupManager.writeBackupStartCode(newStartCode);
-
- handleBulkLoad(backupInfo.getTableNames());
- failStageIf(Stage.stage_4);
-
- // backup complete
- completeBackup(conn, backupInfo, backupManager, BackupType.INCREMENTAL, conf);
-
- } catch (Exception e) {
- failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ",
- BackupType.INCREMENTAL, conf);
- throw new IOException(e);
- }
-
- }
- }
-
- static class FullTableBackupClientForTest extends FullTableBackupClient
- {
-
-
- public FullTableBackupClientForTest() {
- }
-
- public FullTableBackupClientForTest(Connection conn, String backupId, BackupRequest request)
- throws IOException {
- super(conn, backupId, request);
- }
-
- @Override
- public void execute() throws IOException
- {
- // Get the stage ID to fail on
- try (Admin admin = conn.getAdmin();) {
- // Begin BACKUP
- beginBackup(backupManager, backupInfo);
- failStageIf(Stage.stage_0);
- String savedStartCode = null;
- boolean firstBackup = false;
- // do snapshot for full table backup
- savedStartCode = backupManager.readBackupStartCode();
- firstBackup = savedStartCode == null || Long.parseLong(savedStartCode) == 0L;
- if (firstBackup) {
- // This is our first backup. Let's put some marker to system table so that we can hold the logs
- // while we do the backup.
- backupManager.writeBackupStartCode(0L);
- }
- failStageIf(Stage.stage_1);
- // We roll log here before we do the snapshot. It is possible there is duplicate data
- // in the log that is already in the snapshot. But if we do it after the snapshot, we
- // could have data loss.
- // A better approach is to do the roll log on each RS in the same global procedure as
- // the snapshot.
- LOG.info("Execute roll log procedure for full backup ...");
-
- Map<String, String> props = new HashMap<String, String>();
- props.put("backupRoot", backupInfo.getBackupRootDir());
- admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
- LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
- failStageIf(Stage.stage_2);
- newTimestamps = backupManager.readRegionServerLastLogRollResult();
- if (firstBackup) {
- // Updates registered log files
- // We record ALL old WAL files as registered, because
- // this is a first full backup in the system and these
- // files are not needed for next incremental backup
- List<String> logFiles = BackupUtils.getWALFilesOlderThan(conf, newTimestamps);
- backupManager.recordWALFiles(logFiles);
- }
-
- // SNAPSHOT_TABLES:
- backupInfo.setPhase(BackupPhase.SNAPSHOT);
- for (TableName tableName : tableList) {
- String snapshotName =
- "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_"
- + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
-
- snapshotTable(admin, tableName, snapshotName);
- backupInfo.setSnapshotName(tableName, snapshotName);
- }
- failStageIf(Stage.stage_3);
- // SNAPSHOT_COPY:
- // do snapshot copy
- LOG.debug("snapshot copy for " + backupId);
- snapshotCopy(backupInfo);
- // Updates incremental backup table set
- backupManager.addIncrementalBackupTableSet(backupInfo.getTables());
-
- // BACKUP_COMPLETE:
- // set overall backup status: complete. Here we make sure to complete the backup.
- // After this checkpoint, even if entering cancel process, will let the backup finished
- backupInfo.setState(BackupState.COMPLETE);
- // The table list in backupInfo is good for both full backup and incremental backup.
- // For incremental backup, it contains the incremental backup table set.
- backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
-
- HashMap<TableName, HashMap<String, Long>> newTableSetTimestampMap =
- backupManager.readLogTimestampMap();
-
- Long newStartCode =
- BackupUtils.getMinValue(BackupUtils
- .getRSLogTimestampMins(newTableSetTimestampMap));
- backupManager.writeBackupStartCode(newStartCode);
- failStageIf(Stage.stage_4);
- // backup complete
- completeBackup(conn, backupInfo, backupManager, BackupType.FULL, conf);
-
- } catch (Exception e) {
-
- if(autoRestoreOnFailure) {
- failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ",
- BackupType.FULL, conf);
- }
- throw new IOException(e);
- }
- }
-
- }
-
-
- /**
- * @throws java.lang.Exception
- */
- @Before
- public void setUp() throws Exception {
- if (setupIsDone) {
- return;
- }
- if (secure) {
- // set the always on security provider
- UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(),
- HadoopSecurityEnabledUserProviderForTesting.class);
- // setup configuration
- SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());
- }
- String coproc = conf1.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY);
- conf1.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, (coproc == null ? "" : coproc + ",") +
- BackupObserver.class.getName());
- conf1.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
- BackupManager.decorateMasterConfiguration(conf1);
- BackupManager.decorateRegionServerConfiguration(conf1);
- conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
- // Set MultiWAL (with 2 default WAL files per RS)
- conf1.set(WALFactory.WAL_PROVIDER, provider);
- TEST_UTIL.startMiniCluster();
-
- if (useSecondCluster) {
- conf2 = HBaseConfiguration.create(conf1);
- conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
- TEST_UTIL2 = new HBaseTestingUtility(conf2);
- TEST_UTIL2.setZkCluster(TEST_UTIL.getZkCluster());
- TEST_UTIL2.startMiniCluster();
- }
- conf1 = TEST_UTIL.getConfiguration();
-
- TEST_UTIL.startMiniMapReduceCluster();
- BACKUP_ROOT_DIR =
- new Path ( new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")),
- BACKUP_ROOT_DIR).toString();
- LOG.info("ROOTDIR " + BACKUP_ROOT_DIR);
- if (useSecondCluster) {
- BACKUP_REMOTE_ROOT_DIR =
- new Path ( new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS"))
- + BACKUP_REMOTE_ROOT_DIR).toString();
- LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
- }
- createTables();
- populateFromMasterConfig(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), conf1);
- setupIsDone = true;
- }
-
- private static void populateFromMasterConfig(Configuration masterConf, Configuration conf) {
- Iterator<Entry<String, String>> it = masterConf.iterator();
- while (it.hasNext()) {
- Entry<String, String> e = it.next();
- conf.set(e.getKey(), e.getValue());
- }
- }
-
- /**
- * @throws java.lang.Exception
- */
- @AfterClass
- public static void tearDown() throws Exception {
- try{
- SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin());
- } catch (Exception e) {
- }
- SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
- if (useSecondCluster) {
- TEST_UTIL2.shutdownMiniCluster();
- }
- TEST_UTIL.shutdownMiniCluster();
- TEST_UTIL.shutdownMiniMapReduceCluster();
- }
-
- HTable insertIntoTable(Connection conn, TableName table, byte[] family, int id, int numRows)
- throws IOException {
- HTable t = (HTable) conn.getTable(table);
- Put p1;
- for (int i = 0; i < numRows; i++) {
- p1 = new Put(Bytes.toBytes("row-" + table + "-" + id + "-" + i));
- p1.addColumn(family, qualName, Bytes.toBytes("val" + i));
- t.put(p1);
- }
- return t;
- }
-
-
- protected BackupRequest createBackupRequest(BackupType type,
- List<TableName> tables, String path) {
- BackupRequest.Builder builder = new BackupRequest.Builder();
- BackupRequest request = builder.withBackupType(type)
- .withTableList(tables)
- .withTargetRootDir(path).build();
- return request;
- }
-
- protected String backupTables(BackupType type, List<TableName> tables, String path)
- throws IOException {
- Connection conn = null;
- BackupAdmin badmin = null;
- String backupId;
- try {
- conn = ConnectionFactory.createConnection(conf1);
- badmin = new BackupAdminImpl(conn);
- BackupRequest request = createBackupRequest(type, tables, path);
- backupId = badmin.backupTables(request);
- } finally {
- if (badmin != null) {
- badmin.close();
- }
- if (conn != null) {
- conn.close();
- }
- }
- return backupId;
- }
-
- protected String fullTableBackup(List<TableName> tables) throws IOException {
- return backupTables(BackupType.FULL, tables, BACKUP_ROOT_DIR);
- }
-
- protected String incrementalTableBackup(List<TableName> tables) throws IOException {
- return backupTables(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
- }
-
- protected static void loadTable(Table table) throws Exception {
-
- Put p; // 100 + 1 row to t1_syncup
- for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
- p = new Put(Bytes.toBytes("row" + i));
- p.setDurability(Durability.SKIP_WAL);
- p.addColumn(famName, qualName, Bytes.toBytes("val" + i));
- table.put(p);
- }
- }
-
- protected static void createTables() throws Exception {
-
- long tid = System.currentTimeMillis();
- table1 = TableName.valueOf("ns1:test-" + tid);
- HBaseAdmin ha = TEST_UTIL.getHBaseAdmin();
-
- // Create namespaces
- NamespaceDescriptor desc1 = NamespaceDescriptor.create("ns1").build();
- NamespaceDescriptor desc2 = NamespaceDescriptor.create("ns2").build();
- NamespaceDescriptor desc3 = NamespaceDescriptor.create("ns3").build();
- NamespaceDescriptor desc4 = NamespaceDescriptor.create("ns4").build();
-
- ha.createNamespace(desc1);
- ha.createNamespace(desc2);
- ha.createNamespace(desc3);
- ha.createNamespace(desc4);
-
- HTableDescriptor desc = new HTableDescriptor(table1);
- HColumnDescriptor fam = new HColumnDescriptor(famName);
- desc.addFamily(fam);
- ha.createTable(desc);
- table1Desc = desc;
- Connection conn = ConnectionFactory.createConnection(conf1);
- Table table = conn.getTable(table1);
- loadTable(table);
- table.close();
- table2 = TableName.valueOf("ns2:test-" + tid + 1);
- desc = new HTableDescriptor(table2);
- desc.addFamily(fam);
- ha.createTable(desc);
- table = conn.getTable(table2);
- loadTable(table);
- table.close();
- table3 = TableName.valueOf("ns3:test-" + tid + 2);
- table = TEST_UTIL.createTable(table3, famName);
- table.close();
- table4 = TableName.valueOf("ns4:test-" + tid + 3);
- table = TEST_UTIL.createTable(table4, famName);
- table.close();
- ha.close();
- conn.close();
- }
-
- protected boolean checkSucceeded(String backupId) throws IOException {
- BackupInfo status = getBackupInfo(backupId);
- if (status == null) return false;
- return status.getState() == BackupState.COMPLETE;
- }
-
- protected boolean checkFailed(String backupId) throws IOException {
- BackupInfo status = getBackupInfo(backupId);
- if (status == null) return false;
- return status.getState() == BackupState.FAILED;
- }
-
- private BackupInfo getBackupInfo(String backupId) throws IOException {
- try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
- BackupInfo status = table.readBackupInfo(backupId);
- return status;
- }
- }
-
- protected BackupAdmin getBackupAdmin() throws IOException {
- return new BackupAdminImpl(TEST_UTIL.getConnection());
- }
-
- /**
- * Helper method
- */
- protected List<TableName> toList(String... args) {
- List<TableName> ret = new ArrayList<>();
- for (int i = 0; i < args.length; i++) {
- ret.add(TableName.valueOf(args[i]));
- }
- return ret;
- }
-
- protected void dumpBackupDir() throws IOException {
- // Dump Backup Dir
- FileSystem fs = FileSystem.get(conf1);
- RemoteIterator<LocatedFileStatus> it = fs.listFiles(new Path(BACKUP_ROOT_DIR), true);
- while (it.hasNext()) {
- LOG.debug(it.next().getPath());
- }
-
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/79ac70ac/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
deleted file mode 100644
index 4670c49..0000000
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
-@Category(LargeTests.class)
-public class TestBackupBoundaryTests extends TestBackupBase {
-
- private static final Log LOG = LogFactory.getLog(TestBackupBoundaryTests.class);
-
- /**
- * Verify that full backup is created on a single empty table correctly.
- * @throws Exception
- */
- @Test
- public void testFullBackupSingleEmpty() throws Exception {
-
- LOG.info("create full backup image on single table");
- List<TableName> tables = Lists.newArrayList(table3);
- LOG.info("Finished Backup " + fullTableBackup(tables));
- }
-
- /**
- * Verify that full backup is created on multiple empty tables correctly.
- * @throws Exception
- */
- @Test
- public void testFullBackupMultipleEmpty() throws Exception {
- LOG.info("create full backup image on mulitple empty tables");
-
- List<TableName> tables = Lists.newArrayList(table3, table4);
- fullTableBackup(tables);
- }
-
- /**
- * Verify that full backup fails on a single table that does not exist.
- * @throws Exception
- */
- @Test(expected = IOException.class)
- public void testFullBackupSingleDNE() throws Exception {
-
- LOG.info("test full backup fails on a single table that does not exist");
- List<TableName> tables = toList("tabledne");
- fullTableBackup(tables);
- }
-
- /**
- * Verify that full backup fails on multiple tables that do not exist.
- * @throws Exception
- */
- @Test(expected = IOException.class)
- public void testFullBackupMultipleDNE() throws Exception {
-
- LOG.info("test full backup fails on multiple tables that do not exist");
- List<TableName> tables = toList("table1dne", "table2dne");
- fullTableBackup(tables);
- }
-
- /**
- * Verify that full backup fails on tableset containing real and fake tables.
- * @throws Exception
- */
- @Test(expected = IOException.class)
- public void testFullBackupMixExistAndDNE() throws Exception {
- LOG.info("create full backup fails on tableset containing real and fake table");
-
- List<TableName> tables = toList(table1.getNameAsString(), "tabledne");
- fullTableBackup(tables);
- }
-}