You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2015/12/27 19:02:18 UTC
[2/6] hbase git commit: HBASE-14030 HBase Backup/Restore Phase 1
(Vladimir Rodionov)
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
index 9d9cee0..2ceeda5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
@@ -85,6 +85,9 @@ public class WALPlayer extends Configured implements Tool {
private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
+ public WALPlayer(){
+ }
+
protected WALPlayer(final Configuration c) {
super(c);
}
@@ -94,7 +97,7 @@ public class WALPlayer extends Configured implements Tool {
* This one can be used together with {@link KeyValueSortReducer}
*/
static class WALKeyValueMapper
- extends Mapper<WALKey, WALEdit, ImmutableBytesWritable, KeyValue> {
+ extends Mapper<WALKey, WALEdit, ImmutableBytesWritable, KeyValue> {
private byte[] table;
@Override
@@ -106,7 +109,9 @@ public class WALPlayer extends Configured implements Tool {
if (Bytes.equals(table, key.getTablename().getName())) {
for (Cell cell : value.getCells()) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
- if (WALEdit.isMetaEditFamily(kv)) continue;
+ if (WALEdit.isMetaEditFamily(kv)) {
+ continue;
+ }
context.write(new ImmutableBytesWritable(CellUtil.cloneRow(kv)), kv);
}
}
@@ -132,7 +137,7 @@ public class WALPlayer extends Configured implements Tool {
* a running HBase instance.
*/
protected static class WALMapper
- extends Mapper<WALKey, WALEdit, ImmutableBytesWritable, Mutation> {
+ extends Mapper<WALKey, WALEdit, ImmutableBytesWritable, Mutation> {
private Map<TableName, TableName> tables = new TreeMap<TableName, TableName>();
@Override
@@ -149,7 +154,9 @@ public class WALPlayer extends Configured implements Tool {
Cell lastCell = null;
for (Cell cell : value.getCells()) {
// filtering WAL meta entries
- if (WALEdit.isMetaEditFamily(cell)) continue;
+ if (WALEdit.isMetaEditFamily(cell)) {
+ continue;
+ }
// Allow a subclass filter out this cell.
if (filter(context, cell)) {
@@ -160,8 +167,12 @@ public class WALPlayer extends Configured implements Tool {
if (lastCell == null || lastCell.getTypeByte() != cell.getTypeByte()
|| !CellUtil.matchingRow(lastCell, cell)) {
// row or type changed, write out aggregate KVs.
- if (put != null) context.write(tableOut, put);
- if (del != null) context.write(tableOut, del);
+ if (put != null) {
+ context.write(tableOut, put);
+ }
+ if (del != null) {
+ context.write(tableOut, del);
+ }
if (CellUtil.isDelete(cell)) {
del = new Delete(CellUtil.cloneRow(cell));
} else {
@@ -177,8 +188,12 @@ public class WALPlayer extends Configured implements Tool {
lastCell = cell;
}
// write residual KVs
- if (put != null) context.write(tableOut, put);
- if (del != null) context.write(tableOut, del);
+ if (put != null) {
+ context.write(tableOut, put);
+ }
+ if (del != null) {
+ context.write(tableOut, del);
+ }
}
} catch (InterruptedException e) {
e.printStackTrace();
@@ -186,7 +201,8 @@ public class WALPlayer extends Configured implements Tool {
}
/**
- * @param cell
+ * Filter cell
+ * @param cell cell
* @return Return true if we are to emit this cell.
*/
protected boolean filter(Context context, final Cell cell) {
@@ -197,9 +213,7 @@ public class WALPlayer extends Configured implements Tool {
public void setup(Context context) throws IOException {
String[] tableMap = context.getConfiguration().getStrings(TABLE_MAP_KEY);
String[] tablesToUse = context.getConfiguration().getStrings(TABLES_KEY);
- if (tablesToUse == null && tableMap == null) {
- // Then user wants all tables.
- } else if (tablesToUse == null || tableMap == null || tablesToUse.length != tableMap.length) {
+ if (tablesToUse == null || tableMap == null || tablesToUse.length != tableMap.length) {
// this can only happen when WALMapper is used directly by a class other than WALPlayer
throw new IOException("No tables or incorrect table mapping specified.");
}
@@ -215,7 +229,9 @@ public class WALPlayer extends Configured implements Tool {
void setupTime(Configuration conf, String option) throws IOException {
String val = conf.get(option);
- if (null == val) return;
+ if (null == val) {
+ return;
+ }
long ms;
try {
// first try to parse in user friendly form
@@ -295,7 +311,8 @@ public class WALPlayer extends Configured implements Tool {
return job;
}
- /*
+ /**
+ * Print usage
* @param errorMsg Error message. Can be null.
*/
private void usage(final String errorMsg) {
@@ -305,7 +322,8 @@ public class WALPlayer extends Configured implements Tool {
System.err.println("Usage: " + NAME + " [options] <wal inputdir> <tables> [<tableMappings>]");
System.err.println("Read all WAL entries for <tables>.");
System.err.println("If no tables (\"\") are specific, all tables are imported.");
- System.err.println("(Careful, even -ROOT- and hbase:meta entries will be imported in that case.)");
+ System.err.println("(Careful, even -ROOT- and hbase:meta entries will be imported"+
+ " in that case.)");
System.err.println("Otherwise <tables> is a comma separated list of tables.\n");
System.err.println("The WAL entries can be mapped to new set of tables via <tableMapping>.");
System.err.println("<tableMapping> is a command separated list of targettables.");
@@ -318,10 +336,10 @@ public class WALPlayer extends Configured implements Tool {
System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]");
System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]");
System.err.println(" -D " + JOB_NAME_CONF_KEY
- + "=jobName - use the specified mapreduce job name for the wal player");
+ + "=jobName - use the specified mapreduce job name for the wal player");
System.err.println("For performance also consider the following options:\n"
- + " -Dmapreduce.map.speculative=false\n"
- + " -Dmapreduce.reduce.speculative=false");
+ + " -Dmapreduce.map.speculative=false\n"
+ + " -Dmapreduce.reduce.speculative=false");
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index bdb19f4..5cd38b5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
+import org.apache.hadoop.hbase.backup.BackupManager;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
@@ -384,6 +385,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);
Replication.decorateMasterConfiguration(this.conf);
+ BackupManager.decorateMasterConfiguration(this.conf);
// Hack! Maps DFSClient => Master for logs. HDFS made this
// config param for task trackers, but we can piggyback off of it.
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java
index 95c3ffe..b6e11ea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java
@@ -37,7 +37,7 @@ public abstract class RegionServerProcedureManager extends ProcedureManager {
* @param rss Region Server service interface
* @throws KeeperException
*/
- public abstract void initialize(RegionServerServices rss) throws KeeperException;
+ public abstract void initialize(RegionServerServices rss) throws IOException;
/**
* Start accepting procedure requests.
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
index 0f4ea64..adb3604 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.procedure.flush.RegionServerFlushTableProcedureManager;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
-import org.apache.zookeeper.KeeperException;
/**
* Provides the globally barriered procedure framework and environment
@@ -39,7 +38,7 @@ public class RegionServerProcedureManagerHost extends
private static final Log LOG = LogFactory
.getLog(RegionServerProcedureManagerHost.class);
- public void initialize(RegionServerServices rss) throws KeeperException {
+ public void initialize(RegionServerServices rss) throws IOException {
for (RegionServerProcedureManager proc : procedures) {
LOG.debug("Procedure " + proc.getProcedureSignature() + " is initializing");
proc.initialize(rss);
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java
index 085d642..3865ba9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java
@@ -54,7 +54,7 @@ public class ZKProcedureCoordinatorRpcs implements ProcedureCoordinatorRpcs {
* @throws KeeperException if an unexpected zk error occurs
*/
public ZKProcedureCoordinatorRpcs(ZooKeeperWatcher watcher,
- String procedureClass, String coordName) throws KeeperException {
+ String procedureClass, String coordName) throws IOException {
this.watcher = watcher;
this.procedureType = procedureClass;
this.coordName = coordName;
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
index 2e03a60..fff75a7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
@@ -68,49 +68,54 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs {
* @throws KeeperException if we can't reach zookeeper
*/
public ZKProcedureMemberRpcs(final ZooKeeperWatcher watcher, final String procType)
- throws KeeperException {
- this.zkController = new ZKProcedureUtil(watcher, procType) {
- @Override
- public void nodeCreated(String path) {
- if (!isInProcedurePath(path)) {
- return;
- }
+ throws IOException {
+ try {
+ this.zkController = new ZKProcedureUtil(watcher, procType) {
+ @Override
+ public void nodeCreated(String path) {
+ if (!isInProcedurePath(path)) {
+ return;
+ }
- LOG.info("Received created event:" + path);
- // if it is a simple start/end/abort then we just rewatch the node
- if (isAcquiredNode(path)) {
- waitForNewProcedures();
- return;
- } else if (isAbortNode(path)) {
- watchForAbortedProcedures();
- return;
+ LOG.info("Received created event:" + path);
+ // if it is a simple start/end/abort then we just rewatch the node
+ if (isAcquiredNode(path)) {
+ waitForNewProcedures();
+ return;
+ } else if (isAbortNode(path)) {
+ watchForAbortedProcedures();
+ return;
+ }
+ String parent = ZKUtil.getParent(path);
+ // if its the end barrier, the procedure can be completed
+ if (isReachedNode(parent)) {
+ receivedReachedGlobalBarrier(path);
+ return;
+ } else if (isAbortNode(parent)) {
+ abort(path);
+ return;
+ } else if (isAcquiredNode(parent)) {
+ startNewSubprocedure(path);
+ } else {
+ LOG.debug("Ignoring created notification for node:" + path);
+ }
}
- String parent = ZKUtil.getParent(path);
- // if its the end barrier, the procedure can be completed
- if (isReachedNode(parent)) {
- receivedReachedGlobalBarrier(path);
- return;
- } else if (isAbortNode(parent)) {
- abort(path);
- return;
- } else if (isAcquiredNode(parent)) {
- startNewSubprocedure(path);
- } else {
- LOG.debug("Ignoring created notification for node:" + path);
- }
- }
- @Override
- public void nodeChildrenChanged(String path) {
- if (path.equals(this.acquiredZnode)) {
- LOG.info("Received procedure start children changed event: " + path);
- waitForNewProcedures();
- } else if (path.equals(this.abortZnode)) {
- LOG.info("Received procedure abort children changed event: " + path);
- watchForAbortedProcedures();
+ @Override
+ public void nodeChildrenChanged(String path) {
+ if (path.equals(this.acquiredZnode)) {
+ LOG.info("Received procedure start children changed event: " + path);
+ waitForNewProcedures();
+ } else if (path.equals(this.abortZnode)) {
+ LOG.info("Received procedure abort children changed event: " + path);
+ watchForAbortedProcedures();
+ }
}
- }
- };
+ };
+ } catch (KeeperException e) {
+ // TODO Auto-generated catch block
+ throw new IOException(e);
+ }
}
public ZKProcedureUtil getZkController() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
index 1aa959c..bd65cc7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
@@ -317,7 +317,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur
* @throws KeeperException if the zookeeper cannot be reached
*/
@Override
- public void initialize(RegionServerServices rss) throws KeeperException {
+ public void initialize(RegionServerServices rss) throws IOException {
this.rss = rss;
ZooKeeperWatcher zkw = rss.getZooKeeper();
this.memberRpcs = new ZKProcedureMemberRpcs(zkw,
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 211fed5..1cd54fa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -807,8 +807,8 @@ public class HRegionServer extends HasThread implements
rspmHost = new RegionServerProcedureManagerHost();
rspmHost.loadProcedures(conf);
rspmHost.initialize(this);
- } catch (KeeperException e) {
- this.abort("Failed to reach zk cluster when creating procedure handler.", e);
+ } catch (IOException e) {
+ this.abort("Failed to reach coordination cluster when creating procedure handler.", e);
}
// register watcher for recovering regions
this.recoveringRegionWatcher = new RecoveringRegionWatcher(this.zooKeeper, this);
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
index 537329a..e56dd28 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
@@ -390,7 +390,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager {
* @throws KeeperException if the zookeeper cluster cannot be reached
*/
@Override
- public void initialize(RegionServerServices rss) throws KeeperException {
+ public void initialize(RegionServerServices rss) throws IOException {
this.rss = rss;
ZooKeeperWatcher zkw = rss.getZooKeeper();
this.memberRpcs = new ZKProcedureMemberRpcs(zkw,
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index 9ae72e6..acde21e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -97,6 +97,8 @@ import com.lmax.disruptor.TimeoutException;
import com.lmax.disruptor.dsl.Disruptor;
import com.lmax.disruptor.dsl.ProducerType;
+
+
/**
* Implementation of {@link WAL} to go against {@link FileSystem}; i.e. keep WALs in HDFS.
* Only one WAL is ever being written at a time. When a WAL hits a configured maximum size,
@@ -359,7 +361,9 @@ public class FSHLog implements WAL {
public int compare(Path o1, Path o2) {
long t1 = getFileNumFromFileName(o1);
long t2 = getFileNumFromFileName(o2);
- if (t1 == t2) return 0;
+ if (t1 == t2) {
+ return 0;
+ }
return (t1 > t2) ? 1 : -1;
}
};
@@ -402,7 +406,7 @@ public class FSHLog implements WAL {
* @param root path for stored and archived wals
* @param logDir dir where wals are stored
* @param conf configuration to use
- * @throws IOException
+ * @throws IOException exception
*/
public FSHLog(final FileSystem fs, final Path root, final String logDir, final Configuration conf)
throws IOException {
@@ -410,7 +414,7 @@ public class FSHLog implements WAL {
}
/**
- * Create an edit log at the given <code>dir</code> location.
+ * Create an edit log at the given directory location.
*
* You should never have to load an existing log. If there is a log at
* startup, it should have already been processed and deleted by the time the
@@ -425,13 +429,13 @@ public class FSHLog implements WAL {
* be registered before we do anything else; e.g. the
* Constructor {@link #rollWriter()}.
* @param failIfWALExists If true IOException will be thrown if files related to this wal
- * already exist.
+ * already exist.
* @param prefix should always be hostname and port in distributed env and
- * it will be URL encoded before being used.
- * If prefix is null, "wal" will be used
+ * it will be URL encoded before being used.
+ * If prefix is null, "wal" will be used
* @param suffix will be url encoded. null is treated as empty. non-empty must start with
- * {@link DefaultWALProvider#WAL_FILE_NAME_DELIMITER}
- * @throws IOException
+ * {@link DefaultWALProvider#WAL_FILE_NAME_DELIMITER}
+ * @throws IOException exception
*/
public FSHLog(final FileSystem fs, final Path rootDir, final String logDir,
final String archiveDir, final Configuration conf,
@@ -593,7 +597,9 @@ public class FSHLog implements WAL {
@VisibleForTesting
OutputStream getOutputStream() {
FSDataOutputStream fsdos = this.hdfs_out;
- if (fsdos == null) return null;
+ if (fsdos == null) {
+ return null;
+ }
return fsdos.getWrappedStream();
}
@@ -628,7 +634,7 @@ public class FSHLog implements WAL {
/**
* Tell listeners about pre log roll.
- * @throws IOException
+ * @throws IOException exception
*/
private void tellListenersAboutPreLogRoll(final Path oldPath, final Path newPath)
throws IOException {
@@ -641,7 +647,7 @@ public class FSHLog implements WAL {
/**
* Tell listeners about post log roll.
- * @throws IOException
+ * @throws IOException exception
*/
private void tellListenersAboutPostLogRoll(final Path oldPath, final Path newPath)
throws IOException {
@@ -654,8 +660,7 @@ public class FSHLog implements WAL {
/**
* Run a sync after opening to set up the pipeline.
- * @param nextWriter
- * @param startTimeNanos
+ * @param nextWriter next writer
*/
private void preemptiveSync(final ProtobufLogWriter nextWriter) {
long startTimeNanos = System.nanoTime();
@@ -673,7 +678,9 @@ public class FSHLog implements WAL {
rollWriterLock.lock();
try {
// Return if nothing to flush.
- if (!force && (this.writer != null && this.numEntries.get() <= 0)) return null;
+ if (!force && (this.writer != null && this.numEntries.get() <= 0)) {
+ return null;
+ }
byte [][] regionsToFlush = null;
if (this.closed) {
LOG.debug("WAL closed. Skipping rolling of writer");
@@ -728,7 +735,7 @@ public class FSHLog implements WAL {
/**
* Archive old logs. A WAL is eligible for archiving if all its WALEdits have been flushed.
- * @throws IOException
+ * @throws IOException exception
*/
private void cleanOldLogs() throws IOException {
List<Path> logsToArchive = null;
@@ -738,9 +745,13 @@ public class FSHLog implements WAL {
Path log = e.getKey();
Map<byte[], Long> sequenceNums = e.getValue();
if (this.sequenceIdAccounting.areAllLower(sequenceNums)) {
- if (logsToArchive == null) logsToArchive = new ArrayList<Path>();
+ if (logsToArchive == null) {
+ logsToArchive = new ArrayList<Path>();
+ }
logsToArchive.add(log);
- if (LOG.isTraceEnabled()) LOG.trace("WAL file ready for archiving " + log);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("WAL file ready for archiving " + log);
+ }
}
}
if (logsToArchive != null) {
@@ -770,7 +781,9 @@ public class FSHLog implements WAL {
if (regions != null) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < regions.length; i++) {
- if (i > 0) sb.append(", ");
+ if (i > 0) {
+ sb.append(", ");
+ }
sb.append(Bytes.toStringBinary(regions[i]));
}
LOG.info("Too many WALs; count=" + logCount + ", max=" + this.maxLogs +
@@ -836,7 +849,9 @@ public class FSHLog implements WAL {
}
} catch (FailedSyncBeforeLogCloseException e) {
// If unflushed/unsynced entries on close, it is reason to abort.
- if (isUnflushedEntries()) throw e;
+ if (isUnflushedEntries()) {
+ throw e;
+ }
LOG.warn("Failed sync-before-close but no outstanding appends; closing WAL: " +
e.getMessage());
}
@@ -897,7 +912,9 @@ public class FSHLog implements WAL {
try {
blockOnSync(syncFuture);
} catch (IOException ioe) {
- if (LOG.isTraceEnabled()) LOG.trace("Stale sync exception", ioe);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Stale sync exception", ioe);
+ }
}
}
}
@@ -968,7 +985,15 @@ public class FSHLog implements WAL {
public Path getCurrentFileName() {
return computeFilename(this.filenum.get());
}
-
+
+ /**
+ * To support old API compatibility
+ * @return current file number (timestamp)
+ */
+ public long getFilenum() {
+ return filenum.get();
+ }
+
@Override
public String toString() {
return "FSHLog " + logFilePrefix + ":" + logFileSuffix + "(num " + filenum + ")";
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCopy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCopy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCopy.java
new file mode 100644
index 0000000..0360000
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCopy.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.hadoop.hbase.backup.BackupHandler;
+
+/* this class will be extended in future jira to support progress report */
+public class SnapshotCopy extends ExportSnapshot {
+ private BackupHandler backupHandler;
+ private String table;
+
+ public SnapshotCopy(BackupHandler backupHandler, String table) {
+ super();
+ this.backupHandler = backupHandler;
+ this.table = table;
+ }
+
+ public BackupHandler getBackupHandler() {
+ return this.backupHandler;
+ }
+
+ public String getTable() {
+ return this.table;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
index 027e7a2..dd4d337 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
@@ -209,13 +209,18 @@ public class DefaultWALProvider implements WALProvider {
@VisibleForTesting
public static long extractFileNumFromWAL(final WAL wal) {
final Path walName = ((FSHLog)wal).getCurrentFileName();
+ return extractFileNumFromWAL(walName);
+ }
+
+ @VisibleForTesting
+ public static long extractFileNumFromWAL(final Path walName) {
if (walName == null) {
throw new IllegalArgumentException("The WAL path couldn't be null");
}
final String[] walPathStrs = walName.toString().split("\\" + WAL_FILE_NAME_DELIMITER);
return Long.parseLong(walPathStrs[walPathStrs.length - (isMetaFile(walName) ? 2:1)]);
}
-
+
/**
* Pattern used to validate a WAL file name
* see {@link #validateWALFilename(String)} for description.
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
new file mode 100644
index 0000000..bc0c848
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -0,0 +1,194 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupHandler.BACKUPSTATUS;
+import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+import org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+/**
+ * This class is only a base for other integration-level backup tests.
+ * Do not add tests here.
+ * TestBackupSmallTests is where tests that don't require bring machines up/down should go
+ * All other tests should have their own classes and extend this one
+ */
+public class TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestBackupBase.class);
+
+ protected static Configuration conf1;
+ protected static Configuration conf2;
+
+ protected static HBaseTestingUtility TEST_UTIL;
+ protected static HBaseTestingUtility TEST_UTIL2;
+
+ protected static TableName table1;
+ protected static TableName table2;
+ protected static TableName table3;
+ protected static TableName table4;
+
+ protected static String table1_restore = "table1_restore";
+ protected static String table2_restore = "table2_restore";
+ protected static String table3_restore = "table3_restore";
+ protected static String table4_restore = "table4_restore";
+
+ protected static final int NB_ROWS_IN_BATCH = 100;
+ protected static final byte[] qualName = Bytes.toBytes("q1");
+ protected static final byte[] famName = Bytes.toBytes("f");
+
+ protected static String BACKUP_ROOT_DIR = "/backupUT";
+ protected static String BACKUP_REMOTE_ROOT_DIR = "/backupUT";
+
+ protected static final String BACKUP_ZNODE = "/backup/hbase";
+ protected static final String BACKUP_SUCCEED_NODE = "complete";
+ protected static final String BACKUP_FAILED_NODE = "failed";
+
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+
+ TEST_UTIL = new HBaseTestingUtility();
+ TEST_UTIL.getConfiguration().set("hbase.procedure.regionserver.classes",
+ LogRollRegionServerProcedureManager.class.getName());
+ TEST_UTIL.getConfiguration().set("hbase.procedure.master.classes",
+ LogRollMasterProcedureManager.class.getName());
+ TEST_UTIL.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
+ TEST_UTIL.startMiniZKCluster();
+ MiniZooKeeperCluster miniZK = TEST_UTIL.getZkCluster();
+
+ conf1 = TEST_UTIL.getConfiguration();
+ conf2 = HBaseConfiguration.create(conf1);
+ conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
+ TEST_UTIL2 = new HBaseTestingUtility(conf2);
+ TEST_UTIL2.setZkCluster(miniZK);
+ TEST_UTIL.startMiniCluster();
+ TEST_UTIL2.startMiniCluster();
+ conf1 = TEST_UTIL.getConfiguration();
+
+ TEST_UTIL.startMiniMapReduceCluster();
+ BACKUP_ROOT_DIR = TEST_UTIL.getConfiguration().get("fs.defaultFS") + "/backupUT";
+ LOG.info("ROOTDIR " + BACKUP_ROOT_DIR);
+ BACKUP_REMOTE_ROOT_DIR = TEST_UTIL2.getConfiguration().get("fs.defaultFS") + "/backupUT";
+ LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
+
+ BackupClient.setConf(conf1);
+ RestoreClient.setConf(conf1);
+ createTables();
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin());
+ SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
+ //zkw1.close();
+ TEST_UTIL2.shutdownMiniCluster();
+ TEST_UTIL.shutdownMiniCluster();
+ TEST_UTIL.shutdownMiniMapReduceCluster();
+ }
+
+ protected static void loadTable(HTable table) throws Exception {
+
+ Put p; // 100 + 1 row to t1_syncup
+ for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+ p = new Put(Bytes.toBytes("row" + i));
+ p.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+ table.put(p);
+ }
+ }
+
+ protected static void createTables() throws Exception {
+
+ long tid = System.currentTimeMillis();
+ table1 = TableName.valueOf("test-" + tid);
+ HBaseAdmin ha = TEST_UTIL.getHBaseAdmin();
+ HTableDescriptor desc = new HTableDescriptor(table1);
+ HColumnDescriptor fam = new HColumnDescriptor(famName);
+ desc.addFamily(fam);
+ ha.createTable(desc);
+ Connection conn = ConnectionFactory.createConnection(conf1);
+ HTable table = (HTable) conn.getTable(table1);
+ loadTable(table);
+ table.close();
+ table2 = TableName.valueOf("test-" + tid + 1);
+ desc = new HTableDescriptor(table2);
+ desc.addFamily(fam);
+ ha.createTable(desc);
+ table = (HTable) conn.getTable(table2);
+ loadTable(table);
+ table.close();
+ table3 = TableName.valueOf("test-" + tid + 2);
+ table = TEST_UTIL.createTable(table3, famName);
+ table.close();
+ table4 = TableName.valueOf("test-" + tid + 3);
+ table = TEST_UTIL.createTable(table4, famName);
+ table.close();
+ ha.close();
+ conn.close();
+ }
+
+ protected boolean checkSucceeded(String backupId) throws IOException
+ {
+ BackupContext status = getBackupContext(backupId);
+ if(status == null) return false;
+ return status.getFlag() == BACKUPSTATUS.COMPLETE;
+ }
+
+ protected boolean checkFailed(String backupId) throws IOException
+ {
+ BackupContext status = getBackupContext(backupId);
+ if(status == null) return false;
+ return status.getFlag() == BACKUPSTATUS.FAILED;
+ }
+
+ private BackupContext getBackupContext(String backupId) throws IOException
+ {
+ Configuration conf = BackupClient.getConf();
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+ BackupContext status = table.readBackupStatus(backupId);
+ return status;
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
new file mode 100644
index 0000000..8be07bc
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestBackupBoundaryTests extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestBackupBoundaryTests.class);
+
+ /**
+ * Verify that full backup is created on a single empty table correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupSingleEmpty() throws Exception {
+
+ LOG.info("create full backup image on single table");
+
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table3.getNameAsString(), null);
+ LOG.info("Finished Backup");
+ assertTrue(checkSucceeded(backupId));
+ }
+
+ /**
+ * Verify that full backup is created on multiple empty tables correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupMultipleEmpty() throws Exception {
+ LOG.info("create full backup image on mulitple empty tables");
+ String tableset =
+ table3.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table4.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ assertTrue(checkSucceeded(backupId));
+
+ }
+
+ /**
+ * Verify that full backup fails on a single table that does not exist.
+ * @throws Exception
+ */
+ @Test(expected = RuntimeException.class)
+ public void testFullBackupSingleDNE() throws Exception {
+
+ LOG.info("test full backup fails on a single table that does not exist");
+ BackupClient.create("full", BACKUP_ROOT_DIR, "tabledne", null);
+ }
+
+ /**
+ * Verify that full backup fails on multiple tables that do not exist.
+ * @throws Exception
+ */
+ @Test(expected = RuntimeException.class)
+ public void testFullBackupMultipleDNE() throws Exception {
+
+ LOG.info("test full backup fails on multiple tables that do not exist");
+ BackupClient.create("full", BACKUP_ROOT_DIR, "table1dne,table2dne", null);
+ }
+
+ /**
+ * Verify that full backup fails on tableset containing real and fake tables.
+ * @throws Exception
+ */
+ @Test(expected = RuntimeException.class)
+ public void testFullBackupMixExistAndDNE() throws Exception {
+ LOG.info("create full backup fails on tableset containing real and fake table");
+ String tableset =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + "tabledne";
+ BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ }
+
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java
new file mode 100644
index 0000000..158479b
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.backup.master.BackupLogCleaner;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Iterables;
+
+@Category(LargeTests.class)
+public class TestBackupLogCleaner extends TestBackupBase {
+ private static final Log LOG = LogFactory.getLog(TestBackupLogCleaner.class);
+
+ // implements all test cases in 1 test since incremental full backup/
+ // incremental backup has dependencies
+ @Test
+ public void testBackupLogCleaner() throws Exception {
+
+ // #1 - create full backup for all tables
+ LOG.info("create full backup image for all tables");
+ String tablesetFull =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table4.getNameAsString();
+
+ BackupSystemTable systemTable = BackupSystemTable.getTable(TEST_UTIL.getConfiguration());
+ // Verify that we have no backup sessions yet
+ assertFalse(systemTable.hasBackupSessions());
+
+ List<FileStatus> walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
+ List<String> swalFiles = convert(walFiles);
+ BackupLogCleaner cleaner = new BackupLogCleaner();
+ cleaner.setConf(TEST_UTIL.getConfiguration());
+
+ Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
+ // We can delete all files because we do not have yet recorded backup sessions
+ assertTrue(Iterables.size(deletable) == walFiles.size());
+
+ systemTable.addWALFiles(swalFiles, "backup");
+ String backupIdFull = BackupClient.create("full", BACKUP_ROOT_DIR, tablesetFull, null);
+ assertTrue(checkSucceeded(backupIdFull));
+ // Check one more time
+ deletable = cleaner.getDeletableFiles(walFiles);
+ // We can delete wal files because they were saved into hbase:backup table
+ int size = Iterables.size(deletable);
+ assertTrue(size == walFiles.size());
+
+ List<FileStatus> newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
+ LOG.debug("WAL list after full backup");
+ convert(newWalFiles);
+
+ // New list of wal files is greater than the previous one,
+ // because new wal per RS have been opened after full backup
+ assertTrue(walFiles.size() < newWalFiles.size());
+ // TODO : verify that result files are not walFiles collection
+ Connection conn = ConnectionFactory.createConnection(conf1);
+ // #2 - insert some data to table
+ HTable t1 = (HTable) conn.getTable(table1);
+ Put p1;
+ for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+ p1 = new Put(Bytes.toBytes("row-t1" + i));
+ p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+ t1.put(p1);
+ }
+
+ t1.close();
+
+ HTable t2 = (HTable) conn.getTable(table2);
+ Put p2;
+ for (int i = 0; i < 5; i++) {
+ p2 = new Put(Bytes.toBytes("row-t2" + i));
+ p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+ t2.put(p2);
+ }
+
+ t2.close();
+
+ // #3 - incremental backup for multiple tables
+ String tablesetIncMultiple =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+
+ String backupIdIncMultiple =
+ BackupClient.create("incremental", BACKUP_ROOT_DIR, tablesetIncMultiple, null);
+ assertTrue(checkSucceeded(backupIdIncMultiple));
+ deletable = cleaner.getDeletableFiles(newWalFiles);
+
+ assertTrue(Iterables.size(deletable) == newWalFiles.size());
+
+ conn.close();
+
+ }
+
+ private List<String> convert(List<FileStatus> walFiles) {
+ List<String> result = new ArrayList<String>();
+ for (FileStatus fs : walFiles) {
+ LOG.debug("+++WAL: " + fs.getPath().toString());
+ result.add(fs.getPath().toString());
+ }
+ return result;
+ }
+
+ private List<FileStatus> getListOfWALFiles(Configuration c) throws IOException {
+ Path logRoot = new Path(FSUtils.getRootDir(c), HConstants.HREGION_LOGDIR_NAME);
+ FileSystem fs = FileSystem.get(c);
+ RemoteIterator<LocatedFileStatus> it = fs.listFiles(logRoot, true);
+ List<FileStatus> logFiles = new ArrayList<FileStatus>();
+ while (it.hasNext()) {
+ LocatedFileStatus lfs = it.next();
+ if (lfs.isFile() && !DefaultWALProvider.isMetaFile(lfs.getPath())) {
+ logFiles.add(lfs);
+ LOG.info(lfs);
+ }
+ }
+ return logFiles;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
new file mode 100644
index 0000000..7421707
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
@@ -0,0 +1,341 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.backup.BackupHandler.BACKUPSTATUS;
+import org.apache.hadoop.hbase.backup.BackupUtil.BackupCompleteData;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test cases for hbase:backup API
+ *
+ */
+@Category(MediumTests.class)
+public class TestBackupSystemTable {
+
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ protected static Configuration conf = UTIL.getConfiguration();
+ protected static MiniHBaseCluster cluster;
+
+ @BeforeClass
+ public static void setUp() throws Exception {
+ cluster = UTIL.startMiniCluster();
+
+ }
+
+ @Test
+ public void testUpdateReadDeleteBackupStatus() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+ BackupContext ctx = createBackupContext();
+ table.updateBackupStatus(ctx);
+ BackupContext readCtx = table.readBackupStatus(ctx.getBackupId());
+ assertTrue(compare(ctx, readCtx));
+
+ // try fake backup id
+ readCtx = table.readBackupStatus("fake");
+
+ assertNull(readCtx);
+ // delete backup context
+ table.deleteBackupStatus(ctx.getBackupId());
+ readCtx = table.readBackupStatus(ctx.getBackupId());
+ assertNull(readCtx);
+ cleanBackupTable();
+ }
+
+ @Test
+ public void testWriteReadBackupStartCode() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+ String code = "100";
+ table.writeBackupStartCode(code);
+ String readCode = table.readBackupStartCode();
+ assertEquals(code, readCode);
+ cleanBackupTable();
+ }
+
+ private void cleanBackupTable() throws IOException {
+ Admin admin = UTIL.getHBaseAdmin();
+ admin.disableTable(BackupSystemTable.getTableName());
+ admin.truncateTable(BackupSystemTable.getTableName(), true);
+ if (admin.isTableDisabled(BackupSystemTable.getTableName())) {
+ admin.enableTable(BackupSystemTable.getTableName());
+ }
+ }
+
+ @Test
+ public void testBackupHistory() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+ int n = 10;
+ List<BackupContext> list = createBackupContextList(n);
+
+ // Load data
+ for (BackupContext bc : list) {
+ // Make sure we set right status
+ bc.setFlag(BACKUPSTATUS.COMPLETE);
+ table.updateBackupStatus(bc);
+ }
+
+ // Reverse list for comparison
+ Collections.reverse(list);
+ ArrayList<BackupCompleteData> history = table.getBackupHistory();
+ assertTrue(history.size() == n);
+
+ for (int i = 0; i < n; i++) {
+ BackupContext ctx = list.get(i);
+ BackupCompleteData data = history.get(i);
+ assertTrue(compare(ctx, data));
+ }
+
+ cleanBackupTable();
+
+ }
+
+ @Test
+ public void testRegionServerLastLogRollResults() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+
+ String[] servers = new String[] { "server1", "server2", "server3" };
+ String[] timestamps = new String[] { "100", "102", "107" };
+
+ for (int i = 0; i < servers.length; i++) {
+ table.writeRegionServerLastLogRollResult(servers[i], timestamps[i]);
+ }
+
+ HashMap<String, String> result = table.readRegionServerLastLogRollResult();
+ assertTrue(servers.length == result.size());
+ Set<String> keys = result.keySet();
+ String[] keysAsArray = new String[keys.size()];
+ keys.toArray(keysAsArray);
+ Arrays.sort(keysAsArray);
+
+ for (int i = 0; i < keysAsArray.length; i++) {
+ assertEquals(keysAsArray[i], servers[i]);
+ String ts1 = timestamps[i];
+ String ts2 = result.get(keysAsArray[i]);
+ assertEquals(ts1, ts2);
+ }
+
+ cleanBackupTable();
+
+ }
+
+ @Test
+ public void testIncrementalBackupTableSet() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+
+ TreeSet<String> tables1 = new TreeSet<String>();
+
+ tables1.add("t1");
+ tables1.add("t2");
+ tables1.add("t3");
+
+ TreeSet<String> tables2 = new TreeSet<String>();
+
+ tables2.add("t3");
+ tables2.add("t4");
+ tables2.add("t5");
+
+ table.addIncrementalBackupTableSet(tables1);
+ TreeSet<String> res1 = (TreeSet<String>) table.getIncrementalBackupTableSet();
+ assertTrue(tables1.size() == res1.size());
+ Iterator<String> desc1 = tables1.descendingIterator();
+ Iterator<String> desc2 = res1.descendingIterator();
+ while (desc1.hasNext()) {
+ assertEquals(desc1.next(), desc2.next());
+ }
+
+ table.addIncrementalBackupTableSet(tables2);
+ TreeSet<String> res2 = (TreeSet<String>) table.getIncrementalBackupTableSet();
+ assertTrue((tables2.size() + tables1.size() - 1) == res2.size());
+
+ tables1.addAll(tables2);
+
+ desc1 = tables1.descendingIterator();
+ desc2 = res2.descendingIterator();
+
+ while (desc1.hasNext()) {
+ assertEquals(desc1.next(), desc2.next());
+ }
+ cleanBackupTable();
+
+ }
+
+ @Test
+ public void testRegionServerLogTimestampMap() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+
+ TreeSet<String> tables = new TreeSet<String>();
+
+ tables.add("t1");
+ tables.add("t2");
+ tables.add("t3");
+
+ HashMap<String, String> rsTimestampMap = new HashMap<String, String>();
+
+ rsTimestampMap.put("rs1", "100");
+ rsTimestampMap.put("rs2", "101");
+ rsTimestampMap.put("rs3", "103");
+
+ table.writeRegionServerLogTimestamp(tables, rsTimestampMap);
+
+ HashMap<String, HashMap<String, String>> result = table.readLogTimestampMap();
+
+ assertTrue(tables.size() == result.size());
+
+ for (String t : tables) {
+ HashMap<String, String> rstm = result.get(t);
+ assertNotNull(rstm);
+ assertEquals(rstm.get("rs1"), "100");
+ assertEquals(rstm.get("rs2"), "101");
+ assertEquals(rstm.get("rs3"), "103");
+ }
+
+ Set<String> tables1 = new TreeSet<String>();
+
+ tables1.add("t3");
+ tables1.add("t4");
+ tables1.add("t5");
+
+ HashMap<String, String> rsTimestampMap1 = new HashMap<String, String>();
+
+ rsTimestampMap1.put("rs1", "200");
+ rsTimestampMap1.put("rs2", "201");
+ rsTimestampMap1.put("rs3", "203");
+
+ table.writeRegionServerLogTimestamp(tables1, rsTimestampMap1);
+
+ result = table.readLogTimestampMap();
+
+ assertTrue(5 == result.size());
+
+ for (String t : tables) {
+ HashMap<String, String> rstm = result.get(t);
+ assertNotNull(rstm);
+ if (t.equals("t3") == false) {
+ assertEquals(rstm.get("rs1"), "100");
+ assertEquals(rstm.get("rs2"), "101");
+ assertEquals(rstm.get("rs3"), "103");
+ } else {
+ assertEquals(rstm.get("rs1"), "200");
+ assertEquals(rstm.get("rs2"), "201");
+ assertEquals(rstm.get("rs3"), "203");
+ }
+ }
+
+ for (String t : tables1) {
+ HashMap<String, String> rstm = result.get(t);
+ assertNotNull(rstm);
+ assertEquals(rstm.get("rs1"), "200");
+ assertEquals(rstm.get("rs2"), "201");
+ assertEquals(rstm.get("rs3"), "203");
+ }
+
+ cleanBackupTable();
+
+ }
+
+ @Test
+ public void testAddWALFiles() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+ FileSystem fs = FileSystem.get(conf);
+ List<String> files =
+ Arrays.asList("hdfs://server/WALs/srv1,101,15555/srv1,101,15555.default.1",
+ "hdfs://server/WALs/srv2,102,16666/srv2,102,16666.default.2",
+ "hdfs://server/WALs/srv3,103,17777/srv3,103,17777.default.3");
+ String newFile = "hdfs://server/WALs/srv1,101,15555/srv1,101,15555.default.5";
+
+ table.addWALFiles(files, "backup");
+
+ assertTrue(table.checkWALFile(files.get(0)));
+ assertTrue(table.checkWALFile(files.get(1)));
+ assertTrue(table.checkWALFile(files.get(2)));
+ assertFalse(table.checkWALFile(newFile));
+
+ cleanBackupTable();
+ }
+
+ private boolean compare(BackupContext ctx, BackupCompleteData data) {
+
+ return ctx.getBackupId().equals(data.getBackupToken())
+ && ctx.getTargetRootDir().equals(data.getBackupRootPath())
+ && ctx.getType().equals(data.getType())
+ && ctx.getStartTs() == Long.parseLong(data.getStartTime())
+ && ctx.getEndTs() == Long.parseLong(data.getEndTime());
+
+ }
+
+ private boolean compare(BackupContext one, BackupContext two) {
+ return one.getBackupId().equals(two.getBackupId()) && one.getType().equals(two.getType())
+ && one.getTargetRootDir().equals(two.getTargetRootDir())
+ && one.getStartTs() == two.getStartTs() && one.getEndTs() == two.getEndTs();
+ }
+
+ private BackupContext createBackupContext() {
+
+ BackupContext ctxt =
+ new BackupContext("backup_" + System.nanoTime(), "full", new String[] { "t1", "t2", "t3" },
+ "/hbase/backup", null);
+ ctxt.setStartTs(System.currentTimeMillis());
+ ctxt.setEndTs(System.currentTimeMillis() + 1);
+ return ctxt;
+ }
+
+ private List<BackupContext> createBackupContextList(int size) {
+ List<BackupContext> list = new ArrayList<BackupContext>();
+ for (int i = 0; i < size; i++) {
+ list.add(createBackupContext());
+ try {
+ Thread.sleep(10);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ return list;
+ }
+
+ @AfterClass
+ public static void tearDown() throws IOException {
+ if (cluster != null) cluster.shutdown();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
new file mode 100644
index 0000000..19caf37
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestFullBackup extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestFullBackup.class);
+
+ /**
+ * Verify that full backup is created on a single table with data correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupSingle() throws Exception {
+
+ LOG.info("test full backup on a single table with data");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+ }
+
+ /**
+ * Verify that full backup is created on multiple tables correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupMultiple() throws Exception {
+ LOG.info("create full backup image on multiple tables with data");
+ String tableset =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table2.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ assertTrue(checkSucceeded(backupId));
+
+ }
+
+ /**
+ * Verify that full backup is created on all tables correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupAll() throws Exception {
+ LOG.info("create full backup image on all tables");
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, null, null);
+ assertTrue(checkSucceeded(backupId));
+
+ }
+
+ /**
+ * Verify that full backup is created on a table correctly using a snapshot.
+ * @throws Exception
+ */
+ //@Test
+ //public void testFullBackupUsingSnapshot() throws Exception {
+ // HBaseAdmin hba = new HBaseAdmin(conf1);
+ //String snapshot = "snapshot";
+ //hba.snapshot(snapshot, table1);
+ //LOG.info("create full backup image on a table using snapshot");
+ //String backupId =
+ // BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(),
+ // snapshot);
+ // }
+
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
new file mode 100644
index 0000000..1262c54
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestFullRestore extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestFullRestore.class);
+
+ /**
+ * Verify that a single table is restored to a new table
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreSingle() throws Exception {
+
+ LOG.info("test full restore on a single table empty table");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+
+ String[] tableset = new String[] { table1.getNameAsString() };
+ String[] tablemap = new String[] { table1_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap,
+ false);
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(TableName.valueOf(table1_restore)));
+ TEST_UTIL.deleteTable(TableName.valueOf(table1_restore));
+ hba.close();
+ }
+
+ /**
+ * Verify that multiple tables are restored to new tables.
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreMultiple() throws Exception {
+ LOG.info("create full backup image on multiple tables");
+ String tableset =
+ table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ assertTrue(checkSucceeded(backupId));
+
+ String[] restore_tableset = new String[] { table2.getNameAsString(), table3.getNameAsString() };
+ String[] tablemap = new String[] { table2_restore, table3_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false,
+ restore_tableset, tablemap, false);
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(TableName.valueOf(table2_restore)));
+ assertTrue(hba.tableExists(TableName.valueOf(table3_restore)));
+ TEST_UTIL.deleteTable(TableName.valueOf(table2_restore));
+ TEST_UTIL.deleteTable(TableName.valueOf(table3_restore));
+ hba.close();
+ }
+
+ /**
+ * Verify that a single table is restored using overwrite
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreSingleOverwrite() throws Exception {
+
+ LOG.info("test full restore on a single table empty table");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+
+ String[] tableset = new String[] { table1.getNameAsString() };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, null,
+ true);
+ }
+
+ /**
+ * Verify that multiple tables are restored to new tables using overwrite.
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreMultipleOverwrite() throws Exception {
+ LOG.info("create full backup image on multiple tables");
+ String tableset =
+ table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ assertTrue(checkSucceeded(backupId));
+
+ String[] restore_tableset = new String[] { table2.getNameAsString(), table3.getNameAsString() };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false,
+ false, restore_tableset, null, true);
+ }
+
+ /**
+ * Verify that restore fails on a single table that does not exist.
+ * @throws Exception
+ */
+ @Test(expected = IOException.class)
+ public void testFullRestoreSingleDNE() throws Exception {
+
+ LOG.info("test restore fails on a single table that does not exist");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+
+ String[] tableset = new String[] { "faketable" };
+ String[] tablemap = new String[] { table1_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap,
+ false);
+ }
+
+ /**
+ * Verify that restore fails on multiple tables that do not exist.
+ * @throws Exception
+ */
+ @Test(expected = IOException.class)
+ public void testFullRestoreMultipleDNE() throws Exception {
+
+ LOG.info("test restore fails on multiple tables that do not exist");
+ String tableset =
+ table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ assertTrue(checkSucceeded(backupId));
+
+ String[] restore_tableset = new String[] { "faketable1", "faketable2" };
+ String[] tablemap = new String[] { table2_restore, table3_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false,
+ false, restore_tableset, tablemap, false);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
new file mode 100644
index 0000000..5437b84
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestIncrementalBackup extends TestBackupBase {
+ private static final Log LOG = LogFactory.getLog(TestIncrementalBackup.class);
+ //implement all testcases in 1 test since incremental backup/restore has dependencies
+ @Test
+ public void TestIncBackupRestore() throws Exception {
+ HBackupFileSystem hbfs;
+
+ // #1 - create full backup for all tables
+ LOG.info("create full backup image for all tables");
+ String tablesetFull =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table4.getNameAsString();
+
+ String backupIdFull =
+ BackupClient.create("full", BACKUP_ROOT_DIR, tablesetFull, null);
+ assertTrue(checkSucceeded(backupIdFull));
+
+ Connection conn = ConnectionFactory.createConnection(conf1);
+ // #2 - insert some data to table
+ HTable t1 = (HTable) conn.getTable(table1);
+ Put p1;
+ for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+ p1 = new Put(Bytes.toBytes("row-t1" + i));
+ p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+ t1.put(p1);
+ }
+
+ Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2));
+ t1.close();
+
+ HTable t2 = (HTable) conn.getTable(table2);
+ Put p2;
+ for (int i = 0; i < 5; i++) {
+ p2 = new Put(Bytes.toBytes("row-t2" + i));
+ p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+ t2.put(p2);
+ }
+
+ Assert.assertThat(TEST_UTIL.countRows(t2), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5));
+ t2.close();
+
+ // #3 - incremental backup for multiple tables
+ String tablesetIncMultiple =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+
+ String backupIdIncMultiple = BackupClient.create("incremental", BACKUP_ROOT_DIR,
+ tablesetIncMultiple, null);
+ assertTrue(checkSucceeded(backupIdIncMultiple));
+
+
+ // #4 - restore full backup for all tables, without overwrite
+ String[] tablesRestoreFull =
+ new String[] { table1.getNameAsString(), table2.getNameAsString(),
+ table3.getNameAsString(), table4.getNameAsString() };
+
+ String[] tablesMapFull =
+ new String[] { table1_restore, table2_restore, table3_restore, table4_restore };
+
+ hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdFull);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupIdFull, false, false,
+ tablesRestoreFull,
+ tablesMapFull, false);
+
+ // #5.1 - check tables for full restore
+ HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hAdmin.tableExists(TableName.valueOf(table1_restore)));
+ assertTrue(hAdmin.tableExists(TableName.valueOf(table2_restore)));
+ assertTrue(hAdmin.tableExists(TableName.valueOf(table3_restore)));
+ assertTrue(hAdmin.tableExists(TableName.valueOf(table4_restore)));
+
+ hAdmin.close();
+
+ // #5.2 - checking row count of tables for full restore
+ HTable hTable = (HTable) conn.getTable(TableName.valueOf(table1_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table2_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table3_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table4_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+ hTable.close();
+
+ // #6 - restore incremental backup for multiple tables, with overwrite
+ String[] tablesRestoreIncMultiple =
+ new String[]
+ { table1.getNameAsString(), table2.getNameAsString(), table3.getNameAsString() };
+ String[] tablesMapIncMultiple =
+ new String[] { table1_restore, table2_restore, table3_restore };
+ hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncMultiple);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupIdIncMultiple, false, false,
+ tablesRestoreIncMultiple, tablesMapIncMultiple, true);
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table1_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table2_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table3_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+ hTable.close();
+
+ // #7 - incremental backup for single, empty table
+
+ String tablesetIncEmpty = table4.getNameAsString();
+ String backupIdIncEmpty =
+ BackupClient.create("incremental", BACKUP_ROOT_DIR, tablesetIncEmpty, null);
+ assertTrue(checkSucceeded(backupIdIncEmpty));
+
+
+ // #8 - restore incremental backup for single empty table, with overwrite
+ String[] tablesRestoreIncEmpty = new String[] { table4.getNameAsString() };
+ String[] tablesMapIncEmpty = new String[] { table4_restore };
+ hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncEmpty);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupIdIncEmpty, false, false,
+ tablesRestoreIncEmpty,
+ tablesMapIncEmpty, true);
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table4_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+ hTable.close();
+ conn.close();
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
new file mode 100644
index 0000000..03822a2
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRemoteBackup extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestRemoteBackup.class);
+
+ /**
+ * Verify that a remote full backup is created on a single table with data correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupRemote() throws Exception {
+
+ LOG.info("test remote full backup on a single table");
+
+ // String rootdir = TEST_UTIL2.getDefaultRootDirPath() + BACKUP_ROOT_DIR;
+ // LOG.info("ROOTDIR " + rootdir);
+ String backupId =
+ BackupClient.create("full", BACKUP_REMOTE_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+ }
+
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
new file mode 100644
index 0000000..e1315c5
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRemoteRestore extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestRemoteRestore.class);
+
+ /**
+ * Verify that a remote restore on a single table is successful.
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreRemote() throws Exception {
+
+ LOG.info("test remote full backup on a single table");
+ String backupId =
+ BackupClient.create("full", BACKUP_REMOTE_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+ String[] tableset = new String[] { table1.getNameAsString() };
+ String[] tablemap = new String[] { table1_restore };
+ Path path = new Path(BACKUP_REMOTE_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_REMOTE_ROOT_DIR, backupId, false, false, tableset,
+ tablemap, false);
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(TableName.valueOf(table1_restore)));
+ TEST_UTIL.deleteTable(TableName.valueOf(table1_restore));
+ hba.close();
+ }
+
+}
\ No newline at end of file