You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2016/10/05 23:30:15 UTC

[09/10] hbase git commit: HBASE-16727 Backup refactoring: remove MR dependencies from HMaster (Vladimir Rodionov)

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java
deleted file mode 100644
index ac1d2bc..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.impl;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * BackupRestoreConstants holds a bunch of HBase Backup and Restore constants
- */
-@InterfaceAudience.Private
-@InterfaceStability.Stable
-public final class BackupRestoreConstants {
-
-
-  // delimiter in tablename list in restore command
-  public static final String TABLENAME_DELIMITER_IN_COMMAND = ",";
-
-  public static final String CONF_STAGING_ROOT = "snapshot.export.staging.root";
-
-  public static final String BACKUPID_PREFIX = "backup_";
-
-  public static enum BackupCommand {
-    CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS, SET,
-    SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST
-  }
-
-  private BackupRestoreConstants() {
-    // Can't be instantiated with this ctor.
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
deleted file mode 100644
index 3066282..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ /dev/null
@@ -1,873 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
-import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
-
-/**
- * This class provides 'hbase:backup' table API
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public final class BackupSystemTable implements Closeable {
-
-  static class WALItem {
-    String backupId;
-    String walFile;
-    String backupRoot;
-
-    WALItem(String backupId, String walFile, String backupRoot) {
-      this.backupId = backupId;
-      this.walFile = walFile;
-      this.backupRoot = backupRoot;
-    }
-
-    public String getBackupId() {
-      return backupId;
-    }
-
-    public String getWalFile() {
-      return walFile;
-    }
-
-    public String getBackupRoot() {
-      return backupRoot;
-    }
-
-    public String toString() {
-      return "/" + backupRoot + "/" + backupId + "/" + walFile;
-    }
-
-  }
-
-  private static final Log LOG = LogFactory.getLog(BackupSystemTable.class);
-  private final static TableName tableName = TableName.BACKUP_TABLE_NAME;
-  // Stores backup sessions (contexts)
-  final static byte[] SESSIONS_FAMILY = "session".getBytes();
-  // Stores other meta
-  final static byte[] META_FAMILY = "meta".getBytes();
-  // Connection to HBase cluster, shared
-  // among all instances
-  private final Connection connection;
-
-  public BackupSystemTable(Connection conn) throws IOException {
-    this.connection = conn;
-  }
-
-  public void close() {
-    // do nothing
-  }
-
-  /**
-   * Updates status (state) of a backup session in hbase:backup table
-   * @param context context
-   * @throws IOException exception
-   */
-  public void updateBackupInfo(BackupInfo context) throws IOException {
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("update backup status in hbase:backup for: " + context.getBackupId()
-          + " set status=" + context.getState());
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Put put = BackupSystemTableHelper.createPutForBackupContext(context);
-      table.put(put);
-    }
-  }
-
-  /**
-   * Deletes backup status from hbase:backup table
-   * @param backupId backup id
-   * @throws IOException exception
-   */
-
-  public void deleteBackupInfo(String backupId) throws IOException {
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("delete backup status in hbase:backup for " + backupId);
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Delete del = BackupSystemTableHelper.createDeleteForBackupInfo(backupId);
-      table.delete(del);
-    }
-  }
-
-  /**
-   * Reads backup status object (instance of BackupContext) from hbase:backup table
-   * @param backupId - backupId
-   * @return Current status of backup session or null
-   */
-
-  public BackupInfo readBackupInfo(String backupId) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("read backup status from hbase:backup for: " + backupId);
-    }
-
-    try (Table table = connection.getTable(tableName)) {
-      Get get = BackupSystemTableHelper.createGetForBackupContext(backupId);
-      Result res = table.get(get);
-      if (res.isEmpty()) {
-        return null;
-      }
-      return BackupSystemTableHelper.resultToBackupInfo(res);
-    }
-  }
-
-  /**
-   * Read the last backup start code (timestamp) of last successful backup. Will return null if
-   * there is no start code stored on hbase or the value is of length 0. These two cases indicate
-   * there is no successful backup completed so far.
-   * @param backupRoot root directory path to backup
-   * @return the timestamp of last successful backup
-   * @throws IOException exception
-   */
-  public String readBackupStartCode(String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("read backup start code from hbase:backup");
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Get get = BackupSystemTableHelper.createGetForStartCode(backupRoot);
-      Result res = table.get(get);
-      if (res.isEmpty()) {
-        return null;
-      }
-      Cell cell = res.listCells().get(0);
-      byte[] val = CellUtil.cloneValue(cell);
-      if (val.length == 0) {
-        return null;
-      }
-      return new String(val);
-    }
-  }
-
-  /**
-   * Write the start code (timestamp) to hbase:backup. If passed in null, then write 0 byte.
-   * @param startCode start code
-   * @param backupRoot root directory path to backup
-   * @throws IOException exception
-   */
-  public void writeBackupStartCode(Long startCode, String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("write backup start code to hbase:backup " + startCode);
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Put put = BackupSystemTableHelper.createPutForStartCode(startCode.toString(), backupRoot);
-      table.put(put);
-    }
-  }
-
-  /**
-   * Get the Region Servers log information after the last log roll from hbase:backup.
-   * @param backupRoot root directory path to backup
-   * @return RS log info
-   * @throws IOException exception
-   */
-  public HashMap<String, Long> readRegionServerLastLogRollResult(String backupRoot)
-      throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("read region server last roll log result to hbase:backup");
-    }
-
-    Scan scan = BackupSystemTableHelper.createScanForReadRegionServerLastLogRollResult(backupRoot);
-
-    try (Table table = connection.getTable(tableName);
-        ResultScanner scanner = table.getScanner(scan)) {
-      Result res = null;
-      HashMap<String, Long> rsTimestampMap = new HashMap<String, Long>();
-      while ((res = scanner.next()) != null) {
-        res.advance();
-        Cell cell = res.current();
-        byte[] row = CellUtil.cloneRow(cell);
-        String server =
-            BackupSystemTableHelper.getServerNameForReadRegionServerLastLogRollResult(row);
-        byte[] data = CellUtil.cloneValue(cell);
-        rsTimestampMap.put(server, Long.parseLong(new String(data)));
-      }
-      return rsTimestampMap;
-    }
-  }
-
-  /**
-   * Writes Region Server last roll log result (timestamp) to hbase:backup table
-   * @param server - Region Server name
-   * @param ts- last log timestamp
-   * @param backupRoot root directory path to backup
-   * @throws IOException exception
-   */
-  public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot)
-      throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("write region server last roll log result to hbase:backup");
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Put put =
-          BackupSystemTableHelper.createPutForRegionServerLastLogRollResult(server, ts, backupRoot);
-      table.put(put);
-    }
-  }
-
-  /**
-   * Get all completed backup information (in desc order by time)
-   * @param onlyCompeleted, true, if only successfully completed sessions
-   * @return history info of BackupCompleteData
-   * @throws IOException exception
-   */
-  public ArrayList<BackupInfo> getBackupHistory(boolean onlyCompleted) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get backup history from hbase:backup");
-    }
-    ArrayList<BackupInfo> list;
-    BackupState state = onlyCompleted ? BackupState.COMPLETE : BackupState.ANY;
-    list = getBackupContexts(state);
-    return BackupClientUtil.sortHistoryListDesc(list);
-  }
-
-  public List<BackupInfo> getBackupHistory() throws IOException {
-    return getBackupHistory(false);
-  }
-
-  /**
-   * Get history for backup destination
-   * @param backupRoot - backup destination
-   * @return List of backup info
-   * @throws IOException
-   */
-  public List<BackupInfo> getBackupHistory(String backupRoot) throws IOException {
-    ArrayList<BackupInfo> history = getBackupHistory(false);
-    for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext();) {
-      BackupInfo info = iterator.next();
-      if (!backupRoot.equals(info.getTargetRootDir())) {
-        iterator.remove();
-      }
-    }
-    return history;
-  }
-  
-  /**
-   * Get history for a table
-   * @param name - table name
-   * @return history for a table
-   * @throws IOException
-   */
-  public List<BackupInfo> getBackupHistoryForTable(TableName name) throws IOException {
-    List<BackupInfo> history = getBackupHistory();
-    List<BackupInfo> tableHistory = new ArrayList<BackupInfo>();
-    for (BackupInfo info : history) {
-      List<TableName> tables = info.getTableNames();
-      if (tables.contains(name)) {
-        tableHistory.add(info);
-      }
-    }
-    return tableHistory;
-  }
-
-  public Map<TableName, ArrayList<BackupInfo>> 
-    getBackupHistoryForTableSet(Set<TableName> set, String backupRoot) throws IOException {
-    List<BackupInfo> history = getBackupHistory(backupRoot);
-    Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = 
-        new HashMap<TableName, ArrayList<BackupInfo>>();
-    for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext();) {
-      BackupInfo info = iterator.next();
-      if (!backupRoot.equals(info.getTargetRootDir())) {
-        continue;
-      }
-      List<TableName> tables = info.getTableNames();
-      for (TableName tableName: tables) {      
-        if (set.contains(tableName)) {
-          ArrayList<BackupInfo> list = tableHistoryMap.get(tableName);
-          if (list == null) {
-            list = new ArrayList<BackupInfo>();
-            tableHistoryMap.put(tableName, list);
-          }
-          list.add(info);
-        }
-      }
-    }
-    return tableHistoryMap;
-  }
-  
-  /**
-   * Get all backup session with a given status (in desc order by time)
-   * @param status status
-   * @return history info of backup contexts
-   * @throws IOException exception
-   */
-  public ArrayList<BackupInfo> getBackupContexts(BackupState status) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get backup contexts from hbase:backup");
-    }
-
-    Scan scan = BackupSystemTableHelper.createScanForBackupHistory();
-    ArrayList<BackupInfo> list = new ArrayList<BackupInfo>();
-
-    try (Table table = connection.getTable(tableName);
-        ResultScanner scanner = table.getScanner(scan)) {
-      Result res = null;
-      while ((res = scanner.next()) != null) {
-        res.advance();
-        BackupInfo context = BackupSystemTableHelper.cellToBackupInfo(res.current());
-        if (status != BackupState.ANY && context.getState() != status) {
-          continue;
-        }
-        list.add(context);
-      }
-      return list;
-    }
-  }
-
-  /**
-   * Write the current timestamps for each regionserver to hbase:backup after a successful full or
-   * incremental backup. The saved timestamp is of the last log file that was backed up already.
-   * @param tables tables
-   * @param newTimestamps timestamps
-   * @param backupRoot root directory path to backup
-   * @throws IOException exception
-   */
-  public void writeRegionServerLogTimestamp(Set<TableName> tables,
-      HashMap<String, Long> newTimestamps, String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("write RS log time stamps to hbase:backup for tables ["
-          + StringUtils.join(tables, ",") + "]");
-    }
-    List<Put> puts = new ArrayList<Put>();
-    for (TableName table : tables) {
-      byte[] smapData = toTableServerTimestampProto(table, newTimestamps).toByteArray();
-      Put put =
-          BackupSystemTableHelper.createPutForWriteRegionServerLogTimestamp(table, smapData,
-            backupRoot);
-      puts.add(put);
-    }
-    try (Table table = connection.getTable(tableName)) {
-      table.put(puts);
-    }
-  }
-
-  /**
-   * Read the timestamp for each region server log after the last successful backup. Each table has
-   * its own set of the timestamps. The info is stored for each table as a concatenated string of
-   * rs->timestapmp
-   * @param backupRoot root directory path to backup
-   * @return the timestamp for each region server. key: tableName value:
-   *         RegionServer,PreviousTimeStamp
-   * @throws IOException exception
-   */
-  public HashMap<TableName, HashMap<String, Long>> readLogTimestampMap(String backupRoot)
-      throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("read RS log ts from hbase:backup for root=" + backupRoot);
-    }
-
-    HashMap<TableName, HashMap<String, Long>> tableTimestampMap =
-        new HashMap<TableName, HashMap<String, Long>>();
-
-    Scan scan = BackupSystemTableHelper.createScanForReadLogTimestampMap(backupRoot);
-    try (Table table = connection.getTable(tableName);
-        ResultScanner scanner = table.getScanner(scan)) {
-      Result res = null;
-      while ((res = scanner.next()) != null) {
-        res.advance();
-        Cell cell = res.current();
-        byte[] row = CellUtil.cloneRow(cell);
-        String tabName = BackupSystemTableHelper.getTableNameForReadLogTimestampMap(row);
-        TableName tn = TableName.valueOf(tabName);
-        byte[] data = CellUtil.cloneValue(cell);
-        if (data == null) {
-          throw new IOException("Data of last backup data from hbase:backup "
-              + "is empty. Create a backup first.");
-        }
-        if (data != null && data.length > 0) {
-          HashMap<String, Long> lastBackup =
-              fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data));
-          tableTimestampMap.put(tn, lastBackup);
-        }
-      }
-      return tableTimestampMap;
-    }
-  }
-
-  private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table,
-      Map<String, Long> map) {
-    BackupProtos.TableServerTimestamp.Builder tstBuilder =
-        BackupProtos.TableServerTimestamp.newBuilder();
-    tstBuilder.setTable(ProtobufUtil.toProtoTableName(table));
-
-    for (Entry<String, Long> entry : map.entrySet()) {
-      BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder();
-      builder.setServer(entry.getKey());
-      builder.setTimestamp(entry.getValue());
-      tstBuilder.addServerTimestamp(builder.build());
-    }
-
-    return tstBuilder.build();
-  }
-
-  private HashMap<String, Long> fromTableServerTimestampProto(
-      BackupProtos.TableServerTimestamp proto) {
-    HashMap<String, Long> map = new HashMap<String, Long>();
-    List<BackupProtos.ServerTimestamp> list = proto.getServerTimestampList();
-    for (BackupProtos.ServerTimestamp st : list) {
-      map.put(st.getServer(), st.getTimestamp());
-    }
-    return map;
-  }
-
-  /**
-   * Return the current tables covered by incremental backup.
-   * @param backupRoot root directory path to backup
-   * @return set of tableNames
-   * @throws IOException exception
-   */
-  public Set<TableName> getIncrementalBackupTableSet(String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get incr backup table set from hbase:backup");
-    }
-    TreeSet<TableName> set = new TreeSet<>();
-
-    try (Table table = connection.getTable(tableName)) {
-      Get get = BackupSystemTableHelper.createGetForIncrBackupTableSet(backupRoot);
-      Result res = table.get(get);
-      if (res.isEmpty()) {
-        return set;
-      }
-      List<Cell> cells = res.listCells();
-      for (Cell cell : cells) {
-        // qualifier = table name - we use table names as qualifiers
-        set.add(TableName.valueOf(CellUtil.cloneQualifier(cell)));
-      }
-      return set;
-    }
-  }
-
-  /**
-   * Add tables to global incremental backup set
-   * @param tables - set of tables
-   * @param backupRoot root directory path to backup
-   * @throws IOException exception
-   */
-  public void addIncrementalBackupTableSet(Set<TableName> tables, String backupRoot)
-      throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Add incremental backup table set to hbase:backup. ROOT=" + backupRoot
-          + " tables [" + StringUtils.join(tables, " ") + "]");
-      for (TableName table : tables) {
-        LOG.debug(table);
-      }
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Put put = BackupSystemTableHelper.createPutForIncrBackupTableSet(tables, backupRoot);
-      table.put(put);
-    }
-  }
-
-  /**
-   * Removes incremental backup set
-   * @param backupRoot backup root
-   */
-
-  public void deleteIncrementalBackupTableSet(String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Delete incremental backup table set to hbase:backup. ROOT=" + backupRoot);
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Delete delete = BackupSystemTableHelper.createDeleteForIncrBackupTableSet(backupRoot);
-      table.delete(delete);
-    }
-  }
-
-  /**
-   * Register WAL files as eligible for deletion
-   * @param files files
-   * @param backupId backup id
-   * @param backupRoot root directory path to backup
-   * @throws IOException exception
-   */
-  public void addWALFiles(List<String> files, String backupId, String backupRoot)
-      throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("add WAL files to hbase:backup: " + backupId + " " + backupRoot + " files ["
-          + StringUtils.join(files, ",") + "]");
-      for (String f : files) {
-        LOG.debug("add :" + f);
-      }
-    }
-    try (Table table = connection.getTable(tableName)) {
-      List<Put> puts =
-          BackupSystemTableHelper.createPutsForAddWALFiles(files, backupId, backupRoot);
-      table.put(puts);
-    }
-  }
-
-  /**
-   * Register WAL files as eligible for deletion
-   * @param backupRoot root directory path to backup
-   * @throws IOException exception
-   */
-  public Iterator<WALItem> getWALFilesIterator(String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get WAL files from hbase:backup");
-    }
-    final Table table = connection.getTable(tableName);
-    Scan scan = BackupSystemTableHelper.createScanForGetWALs(backupRoot);
-    final ResultScanner scanner = table.getScanner(scan);
-    final Iterator<Result> it = scanner.iterator();
-    return new Iterator<WALItem>() {
-
-      @Override
-      public boolean hasNext() {
-        boolean next = it.hasNext();
-        if (!next) {
-          // close all
-          try {
-            scanner.close();
-            table.close();
-          } catch (IOException e) {
-            LOG.error("Close WAL Iterator", e);
-          }
-        }
-        return next;
-      }
-
-      @Override
-      public WALItem next() {
-        Result next = it.next();
-        List<Cell> cells = next.listCells();
-        byte[] buf = cells.get(0).getValueArray();
-        int len = cells.get(0).getValueLength();
-        int offset = cells.get(0).getValueOffset();
-        String backupId = new String(buf, offset, len);
-        buf = cells.get(1).getValueArray();
-        len = cells.get(1).getValueLength();
-        offset = cells.get(1).getValueOffset();
-        String walFile = new String(buf, offset, len);
-        buf = cells.get(2).getValueArray();
-        len = cells.get(2).getValueLength();
-        offset = cells.get(2).getValueOffset();
-        String backupRoot = new String(buf, offset, len);
-        return new WALItem(backupId, walFile, backupRoot);
-      }
-
-      @Override
-      public void remove() {
-        // not implemented
-        throw new RuntimeException("remove is not supported");
-      }
-    };
-
-  }
-
-  /**
-   * Check if WAL file is eligible for deletion Future: to support all backup destinations
-   * @param file file
-   * @return true, if - yes.
-   * @throws IOException exception
-   */
-  public boolean isWALFileDeletable(String file) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Check if WAL file has been already backed up in hbase:backup " + file);
-    }
-    try (Table table = connection.getTable(tableName)) {
-      Get get = BackupSystemTableHelper.createGetForCheckWALFile(file);
-      Result res = table.get(get);
-      if (res.isEmpty()) {
-        return false;
-      }
-      return true;
-    }
-  }
-
-  /**
-   * Checks if we have at least one backup session in hbase:backup This API is used by
-   * BackupLogCleaner
-   * @return true, if - at least one session exists in hbase:backup table
-   * @throws IOException exception
-   */
-  public boolean hasBackupSessions() throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Has backup sessions from hbase:backup");
-    }
-    boolean result = false;
-    Scan scan = BackupSystemTableHelper.createScanForBackupHistory();
-    scan.setCaching(1);
-    try (Table table = connection.getTable(tableName);
-        ResultScanner scanner = table.getScanner(scan)) {
-      if (scanner.next() != null) {
-        result = true;
-      }
-      return result;
-    }
-  }
-
-  /**
-   * BACKUP SETS
-   */
-
-  /**
-   * Get backup set list
-   * @return backup set list
-   * @throws IOException
-   */
-  public List<String> listBackupSets() throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" Backup set list");
-    }
-    List<String> list = new ArrayList<String>();
-    Table table = null;
-    ResultScanner scanner = null;
-    try {
-      table = connection.getTable(tableName);
-      Scan scan = BackupSystemTableHelper.createScanForBackupSetList();
-      scan.setMaxVersions(1);
-      scanner = table.getScanner(scan);
-      Result res = null;
-      while ((res = scanner.next()) != null) {
-        res.advance();
-        list.add(BackupSystemTableHelper.cellKeyToBackupSetName(res.current()));
-      }
-      return list;
-    } finally {
-      if (scanner != null) {
-        scanner.close();
-      }
-      if (table != null) {
-        table.close();
-      }
-    }
-  }
-
-  /**
-   * Get backup set description (list of tables)
-   * @param name - set's name
-   * @return list of tables in a backup set
-   * @throws IOException
-   */
-  public List<TableName> describeBackupSet(String name) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" Backup set describe: " + name);
-    }
-    Table table = null;
-    try {
-      table = connection.getTable(tableName);
-      Get get = BackupSystemTableHelper.createGetForBackupSet(name);
-      Result res = table.get(get);
-      if (res.isEmpty()) return null;
-      res.advance();
-      String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current());
-      return toList(tables);
-    } finally {
-      if (table != null) {
-        table.close();
-      }
-    }
-  }
-
-  private List<TableName> toList(String[] tables) {
-    List<TableName> list = new ArrayList<TableName>(tables.length);
-    for (String name : tables) {
-      list.add(TableName.valueOf(name));
-    }
-    return list;
-  }
-
-  /**
-   * Add backup set (list of tables)
-   * @param name - set name
-   * @param tables - list of tables, comma-separated
-   * @throws IOException
-   */
-  public void addToBackupSet(String name, String[] newTables) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Backup set add: " + name + " tables [" + StringUtils.join(newTables, " ") + "]");
-    }
-    Table table = null;
-    String[] union = null;
-    try {
-      table = connection.getTable(tableName);
-      Get get = BackupSystemTableHelper.createGetForBackupSet(name);
-      Result res = table.get(get);
-      if (res.isEmpty()) {
-        union = newTables;
-      } else {
-        res.advance();
-        String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current());
-        union = merge(tables, newTables);
-      }
-      Put put = BackupSystemTableHelper.createPutForBackupSet(name, union);
-      table.put(put);
-    } finally {
-      if (table != null) {
-        table.close();
-      }
-    }
-  }
-
-  private String[] merge(String[] tables, String[] newTables) {
-    List<String> list = new ArrayList<String>();
-    // Add all from tables
-    for (String t : tables) {
-      list.add(t);
-    }
-    for (String nt : newTables) {
-      if (list.contains(nt)) continue;
-      list.add(nt);
-    }
-    String[] arr = new String[list.size()];
-    list.toArray(arr);
-    return arr;
-  }
-
-  /**
-   * Remove tables from backup set (list of tables)
-   * @param name - set name
-   * @param tables - list of tables, comma-separated
-   * @throws IOException
-   */
-  public void removeFromBackupSet(String name, String[] toRemove) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" Backup set remove from : " + name + " tables [" + StringUtils.join(toRemove, " ")
-          + "]");
-    }
-    Table table = null;
-    String[] disjoint = null;
-    try {
-      table = connection.getTable(tableName);
-      Get get = BackupSystemTableHelper.createGetForBackupSet(name);
-      Result res = table.get(get);
-      if (res.isEmpty()) {
-        LOG.warn("Backup set '" + name + "' not found.");
-        return;
-      } else {
-        res.advance();
-        String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current());
-        disjoint = disjoin(tables, toRemove);
-      }
-      if (disjoint.length > 0) {
-        Put put = BackupSystemTableHelper.createPutForBackupSet(name, disjoint);
-        table.put(put);
-      } else {
-        // Delete
-        // describeBackupSet(name);
-        LOG.warn("Backup set '" + name + "' does not contain tables ["
-            + StringUtils.join(toRemove, " ") + "]");
-      }
-    } finally {
-      if (table != null) {
-        table.close();
-      }
-    }
-  }
-
-  private String[] disjoin(String[] tables, String[] toRemove) {
-    List<String> list = new ArrayList<String>();
-    // Add all from tables
-    for (String t : tables) {
-      list.add(t);
-    }
-    for (String nt : toRemove) {
-      if (list.contains(nt)) {
-        list.remove(nt);
-      }
-    }
-    String[] arr = new String[list.size()];
-    list.toArray(arr);
-    return arr;
-  }
-
-  /**
-   * Delete backup set
-   * @param name set's name
-   * @throws IOException
-   */
-  public void deleteBackupSet(String name) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" Backup set delete: " + name);
-    }
-    Table table = null;
-    try {
-      table = connection.getTable(tableName);
-      Delete del = BackupSystemTableHelper.createDeleteForBackupSet(name);
-      table.delete(del);
-    } finally {
-      if (table != null) {
-        table.close();
-      }
-    }
-  }
-
-  /**
-   * Get backup system table descriptor
-   * @return descriptor
-   */
-  public static HTableDescriptor getSystemTableDescriptor() {
-    HTableDescriptor tableDesc = new HTableDescriptor(tableName);
-    HColumnDescriptor colSessionsDesc = new HColumnDescriptor(SESSIONS_FAMILY);
-    colSessionsDesc.setMaxVersions(1);
-    // Time to keep backup sessions (secs)
-    Configuration config = HBaseConfiguration.create();
-    int ttl = config.getInt(HConstants.BACKUP_SYSTEM_TTL_KEY, HConstants.BACKUP_SYSTEM_TTL_DEFAULT);
-    colSessionsDesc.setTimeToLive(ttl);
-    tableDesc.addFamily(colSessionsDesc);
-    HColumnDescriptor colMetaDesc = new HColumnDescriptor(META_FAMILY);
-    // colDesc.setMaxVersions(1);
-    tableDesc.addFamily(colMetaDesc);
-    return tableDesc;
-  }
-
-  public static String getTableNameAsString() {
-    return tableName.getNameAsString();
-  }
-
-  public static TableName getTableName() {
-    return tableName;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
deleted file mode 100644
index 37f29f8..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
+++ /dev/null
@@ -1,433 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.Bytes;
-
-
-/**
- * A collection for methods used by BackupSystemTable.
- */
-
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public final class BackupSystemTableHelper {
-
-  /**
-   * hbase:backup schema: 
-   * 1. Backup sessions rowkey= "session:" + backupId; value = serialized BackupContext 
-   * 2. Backup start code rowkey = "startcode:" + backupRoot; value = startcode 
-   * 3. Incremental backup set rowkey="incrbackupset:" + backupRoot; value=[list of tables] 
-   * 4. Table-RS-timestamp map rowkey="trslm:"+ backupRoot+table_name; value = map[RS-> last WAL
-   * timestamp] 
-   * 5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp
-   * 6. WALs recorded rowkey="wals:"+WAL unique file name; value = backupId and full WAL file name
-   */
-
-  private final static String BACKUP_INFO_PREFIX = "session:";
-  private final static String START_CODE_ROW = "startcode:";
-  private final static String INCR_BACKUP_SET = "incrbackupset:";
-  private final static String TABLE_RS_LOG_MAP_PREFIX = "trslm:";
-  private final static String RS_LOG_TS_PREFIX = "rslogts:";
-  private final static String WALS_PREFIX = "wals:";
-  private final static String SET_KEY_PREFIX = "backupset:";
-
-  private final static byte[] EMPTY_VALUE = new byte[] {};
-
-  // Safe delimiter in a string
-  private final static String NULL = "\u0000";
-
-  private BackupSystemTableHelper() {
-    throw new AssertionError("Instantiating utility class...");
-  }
-
-  /**
-   * Creates Put operation for a given backup context object
-   * @param context backup context
-   * @return put operation
-   * @throws IOException exception
-   */
-  static Put createPutForBackupContext(BackupInfo context) throws IOException {
-    Put put = new Put(rowkey(BACKUP_INFO_PREFIX, context.getBackupId()));
-    put.addColumn(BackupSystemTable.SESSIONS_FAMILY, "context".getBytes(), context.toByteArray());
-    return put;
-  }
-
-  /**
-   * Creates Get operation for a given backup id
-   * @param backupId - backup's ID
-   * @return get operation
-   * @throws IOException exception
-   */
-  static Get createGetForBackupContext(String backupId) throws IOException {
-    Get get = new Get(rowkey(BACKUP_INFO_PREFIX, backupId));
-    get.addFamily(BackupSystemTable.SESSIONS_FAMILY);
-    get.setMaxVersions(1);
-    return get;
-  }
-
-  /**
-   * Creates Delete operation for a given backup id
-   * @param backupId - backup's ID
-   * @return delete operation
-   * @throws IOException exception
-   */
-  public static Delete createDeleteForBackupInfo(String backupId) {
-    Delete del = new Delete(rowkey(BACKUP_INFO_PREFIX, backupId));
-    del.addFamily(BackupSystemTable.SESSIONS_FAMILY);
-    return del;
-  }
-
-  /**
-   * Converts Result to BackupContext
-   * @param res - HBase result
-   * @return backup context instance
-   * @throws IOException exception
-   */
-  static BackupInfo resultToBackupInfo(Result res) throws IOException {
-    res.advance();
-    Cell cell = res.current();
-    return cellToBackupInfo(cell);
-  }
-
-  /**
-   * Creates Get operation to retrieve start code from hbase:backup
-   * @return get operation
-   * @throws IOException exception
-   */
-  static Get createGetForStartCode(String rootPath) throws IOException {
-    Get get = new Get(rowkey(START_CODE_ROW, rootPath));
-    get.addFamily(BackupSystemTable.META_FAMILY);
-    get.setMaxVersions(1);
-    return get;
-  }
-
-  /**
-   * Creates Put operation to store start code to hbase:backup
-   * @return put operation
-   * @throws IOException exception
-   */
-  static Put createPutForStartCode(String startCode, String rootPath) {
-    Put put = new Put(rowkey(START_CODE_ROW, rootPath));
-    put.addColumn(BackupSystemTable.META_FAMILY, "startcode".getBytes(), startCode.getBytes());
-    return put;
-  }
-
-  /**
-   * Creates Get to retrieve incremental backup table set from hbase:backup
-   * @return get operation
-   * @throws IOException exception
-   */
-  static Get createGetForIncrBackupTableSet(String backupRoot) throws IOException {
-    Get get = new Get(rowkey(INCR_BACKUP_SET, backupRoot));
-    get.addFamily(BackupSystemTable.META_FAMILY);
-    get.setMaxVersions(1);
-    return get;
-  }
-
-  /**
-   * Creates Put to store incremental backup table set
-   * @param tables tables
-   * @return put operation
-   */
-  static Put createPutForIncrBackupTableSet(Set<TableName> tables, String backupRoot) {
-    Put put = new Put(rowkey(INCR_BACKUP_SET, backupRoot));
-    for (TableName table : tables) {
-      put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes(table.getNameAsString()),
-        EMPTY_VALUE);
-    }
-    return put;
-  }
-
-  /**
-   * Creates Delete for incremental backup table set
-   * @param backupRoot backup root
-   * @return delete operation
-   */
-  static Delete createDeleteForIncrBackupTableSet(String backupRoot) {
-    Delete delete = new Delete(rowkey(INCR_BACKUP_SET, backupRoot));
-    delete.addFamily(BackupSystemTable.META_FAMILY);
-    return delete;
-  }
-
-  /**
-   * Creates Scan operation to load backup history
-   * @return scan operation
-   */
-  static Scan createScanForBackupHistory() {
-    Scan scan = new Scan();
-    byte[] startRow = BACKUP_INFO_PREFIX.getBytes();
-    byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
-    stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
-    scan.setStartRow(startRow);
-    scan.setStopRow(stopRow);
-    scan.addFamily(BackupSystemTable.SESSIONS_FAMILY);
-    scan.setMaxVersions(1);
-    return scan;
-  }
-
-  /**
-   * Converts cell to backup context instance.
-   * @param current - cell
-   * @return backup context instance
-   * @throws IOException exception
-   */
-  static BackupInfo cellToBackupInfo(Cell current) throws IOException {
-    byte[] data = CellUtil.cloneValue(current);
-    return BackupInfo.fromByteArray(data);
-  }
-
-  /**
-   * Creates Put to write RS last roll log timestamp map
-   * @param table - table
-   * @param smap - map, containing RS:ts
-   * @return put operation
-   */
-  static Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap,
-      String backupRoot) {
-    Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString()));
-    put.addColumn(BackupSystemTable.META_FAMILY, "log-roll-map".getBytes(), smap);
-    return put;
-  }
-
-  /**
-   * Creates Scan to load table-> { RS -> ts} map of maps
-   * @return scan operation
-   */
-  static Scan createScanForReadLogTimestampMap(String backupRoot) {
-    Scan scan = new Scan();
-    byte[] startRow = rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot);
-    byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
-    stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
-    scan.setStartRow(startRow);
-    scan.setStopRow(stopRow);
-    scan.addFamily(BackupSystemTable.META_FAMILY);
-
-    return scan;
-  }
-
-  /**
-   * Get table name from rowkey
-   * @param cloneRow rowkey
-   * @return table name
-   */
-  static String getTableNameForReadLogTimestampMap(byte[] cloneRow) {
-    String s = new String(cloneRow);
-    int index = s.lastIndexOf(NULL);
-    return s.substring(index + 1);
-  }
-
-  /**
-   * Creates Put to store RS last log result
-   * @param server - server name
-   * @param timestamp - log roll result (timestamp)
-   * @return put operation
-   */
-  static Put createPutForRegionServerLastLogRollResult(String server, Long timestamp,
-      String backupRoot) {
-    Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server));
-    put.addColumn(BackupSystemTable.META_FAMILY, "rs-log-ts".getBytes(), timestamp.toString()
-        .getBytes());
-    return put;
-  }
-
-  /**
-   * Creates Scan operation to load last RS log roll results
-   * @return scan operation
-   */
-  static Scan createScanForReadRegionServerLastLogRollResult(String backupRoot) {
-    Scan scan = new Scan();
-    byte[] startRow = rowkey(RS_LOG_TS_PREFIX, backupRoot);
-    byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
-    stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
-    scan.setStartRow(startRow);
-    scan.setStopRow(stopRow);
-    scan.addFamily(BackupSystemTable.META_FAMILY);
-    scan.setMaxVersions(1);
-
-    return scan;
-  }
-
-  /**
-   * Get server's name from rowkey
-   * @param row - rowkey
-   * @return server's name
-   */
-  static String getServerNameForReadRegionServerLastLogRollResult(byte[] row) {
-    String s = new String(row);
-    int index = s.lastIndexOf(NULL);
-    return s.substring(index + 1);
-  }
-
-  /**
-   * Creates put list for list of WAL files
-   * @param files list of WAL file paths
-   * @param backupId backup id
-   * @return put list
-   * @throws IOException exception
-   */
-  public static List<Put> createPutsForAddWALFiles(List<String> files, String backupId,
-      String backupRoot) throws IOException {
-
-    List<Put> puts = new ArrayList<Put>();
-    for (String file : files) {
-      Put put = new Put(rowkey(WALS_PREFIX, BackupClientUtil.getUniqueWALFileNamePart(file)));
-      put.addColumn(BackupSystemTable.META_FAMILY, "backupId".getBytes(), backupId.getBytes());
-      put.addColumn(BackupSystemTable.META_FAMILY, "file".getBytes(), file.getBytes());
-      put.addColumn(BackupSystemTable.META_FAMILY, "root".getBytes(), backupRoot.getBytes());
-      puts.add(put);
-    }
-    return puts;
-  }
-
-  /**
-   * Creates Scan operation to load WALs TODO: support for backupRoot
-   * @param backupRoot - path to backup destination
-   * @return scan operation
-   */
-  public static Scan createScanForGetWALs(String backupRoot) {
-    Scan scan = new Scan();
-    byte[] startRow = WALS_PREFIX.getBytes();
-    byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
-    stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
-    scan.setStartRow(startRow);
-    scan.setStopRow(stopRow);
-    scan.addFamily(BackupSystemTable.META_FAMILY);
-    return scan;
-  }
-
-  /**
-   * Creates Get operation for a given wal file name TODO: support for backup destination
-   * @param file file
-   * @return get operation
-   * @throws IOException exception
-   */
-  public static Get createGetForCheckWALFile(String file) throws IOException {
-    Get get = new Get(rowkey(WALS_PREFIX, BackupClientUtil.getUniqueWALFileNamePart(file)));
-    // add backup root column
-    get.addFamily(BackupSystemTable.META_FAMILY);
-    return get;
-  }
-
-  /**
-   * Creates Scan operation to load backup set list
-   * @return scan operation
-   */
-  static Scan createScanForBackupSetList() {
-    Scan scan = new Scan();
-    byte[] startRow = SET_KEY_PREFIX.getBytes();
-    byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
-    stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
-    scan.setStartRow(startRow);
-    scan.setStopRow(stopRow);
-    scan.addFamily(BackupSystemTable.META_FAMILY);
-    return scan;
-  }
-
-  /**
-   * Creates Get operation to load backup set content
-   * @return get operation
-   */
-  static Get createGetForBackupSet(String name) {
-    Get get = new Get(rowkey(SET_KEY_PREFIX, name));
-    get.addFamily(BackupSystemTable.META_FAMILY);
-    return get;
-  }
-
-  /**
-   * Creates Delete operation to delete backup set content
-   * @param name - backup set's name
-   * @return delete operation
-   */
-  static Delete createDeleteForBackupSet(String name) {
-    Delete del = new Delete(rowkey(SET_KEY_PREFIX, name));
-    del.addFamily(BackupSystemTable.META_FAMILY);
-    return del;
-  }
-
-  /**
-   * Creates Put operation to update backup set content
-   * @param name - backup set's name
-   * @param tables - list of tables
-   * @return put operation
-   */
-  static Put createPutForBackupSet(String name, String[] tables) {
-    Put put = new Put(rowkey(SET_KEY_PREFIX, name));
-    byte[] value = convertToByteArray(tables);
-    put.addColumn(BackupSystemTable.META_FAMILY, "tables".getBytes(), value);
-    return put;
-  }
-
-  private static byte[] convertToByteArray(String[] tables) {
-    return StringUtils.join(tables, ",").getBytes();
-  }
-
-  /**
-   * Converts cell to backup set list.
-   * @param current - cell
-   * @return backup set
-   * @throws IOException
-   */
-  static String[] cellValueToBackupSet(Cell current) throws IOException {
-    byte[] data = CellUtil.cloneValue(current);
-    if (data != null && data.length > 0) {
-      return new String(data).split(",");
-    } else {
-      return new String[0];
-    }
-  }
-
-  /**
-   * Converts cell key to backup set name.
-   * @param current - cell
-   * @return backup set name
-   * @throws IOException
-   */
-  static String cellKeyToBackupSetName(Cell current) throws IOException {
-    byte[] data = CellUtil.cloneRow(current);
-    return new String(data).substring(SET_KEY_PREFIX.length());
-  }
-
-  static byte[] rowkey(String s, String... other) {
-    StringBuilder sb = new StringBuilder(s);
-    for (String ss : other) {
-      sb.append(ss);
-    }
-    return sb.toString().getBytes();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java
deleted file mode 100644
index c22f51b..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java
+++ /dev/null
@@ -1,437 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.util;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URLDecoder;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.impl.BackupManifest;
-import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * A collection of methods used by multiple classes to backup HBase tables.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public final class BackupClientUtil {
-  protected static final Log LOG = LogFactory.getLog(BackupClientUtil.class);
-  public static final String LOGNAME_SEPARATOR = ".";
-
-  private BackupClientUtil() {
-    throw new AssertionError("Instantiating utility class...");
-  }
-
-  /**
-   * Check whether the backup path exist
-   * @param backupStr backup
-   * @param conf configuration
-   * @return Yes if path exists
-   * @throws IOException exception
-   */
-  public static boolean checkPathExist(String backupStr, Configuration conf) throws IOException {
-    boolean isExist = false;
-    Path backupPath = new Path(backupStr);
-    FileSystem fileSys = backupPath.getFileSystem(conf);
-    String targetFsScheme = fileSys.getUri().getScheme();
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Schema of given url: " + backupStr + " is: " + targetFsScheme);
-    }
-    if (fileSys.exists(backupPath)) {
-      isExist = true;
-    }
-    return isExist;
-  }
-
-  // check target path first, confirm it doesn't exist before backup
-  public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException {
-    boolean targetExists = false;
-    try {
-      targetExists = checkPathExist(backupRootPath, conf);
-    } catch (IOException e) {
-      String expMsg = e.getMessage();
-      String newMsg = null;
-      if (expMsg.contains("No FileSystem for scheme")) {
-        newMsg =
-            "Unsupported filesystem scheme found in the backup target url. Error Message: "
-                + newMsg;
-        LOG.error(newMsg);
-        throw new IOException(newMsg);
-      } else {
-        throw e;
-      }
-    }
-
-    if (targetExists) {
-      LOG.info("Using existing backup root dir: " + backupRootPath);
-    } else {
-      LOG.info("Backup root dir " + backupRootPath + " does not exist. Will be created.");
-    }
-  }
-
-  /**
-   * Get the min value for all the Values a map.
-   * @param map map
-   * @return the min value
-   */
-  public static <T> Long getMinValue(HashMap<T, Long> map) {
-    Long minTimestamp = null;
-    if (map != null) {
-      ArrayList<Long> timestampList = new ArrayList<Long>(map.values());
-      Collections.sort(timestampList);
-      // The min among all the RS log timestamps will be kept in hbase:backup table.
-      minTimestamp = timestampList.get(0);
-    }
-    return minTimestamp;
-  }
-
-  /**
-   * Parses host name:port from archived WAL path
-   * @param p path
-   * @return host name
-   * @throws IOException exception
-   */
-  public static String parseHostFromOldLog(Path p) {
-    try {
-      String n = p.getName();
-      int idx = n.lastIndexOf(LOGNAME_SEPARATOR);
-      String s = URLDecoder.decode(n.substring(0, idx), "UTF8");
-      return ServerName.parseHostname(s) + ":" + ServerName.parsePort(s);
-    } catch (Exception e) {
-      LOG.warn("Skip log file (can't parse): " + p);
-      return null;
-    }
-  }
-
-  /**
-   * Given the log file, parse the timestamp from the file name. The timestamp is the last number.
-   * @param p a path to the log file
-   * @return the timestamp
-   * @throws IOException exception
-   */
-  public static Long getCreationTime(Path p) throws IOException {
-    int idx = p.getName().lastIndexOf(LOGNAME_SEPARATOR);
-    if (idx < 0) {
-      throw new IOException("Cannot parse timestamp from path " + p);
-    }
-    String ts = p.getName().substring(idx + 1);
-    return Long.parseLong(ts);
-  }
-
-  public static List<String> getFiles(FileSystem fs, Path rootDir, List<String> files,
-      PathFilter filter) throws FileNotFoundException, IOException {
-    RemoteIterator<LocatedFileStatus> it = fs.listFiles(rootDir, true);
-
-    while (it.hasNext()) {
-      LocatedFileStatus lfs = it.next();
-      if (lfs.isDirectory()) {
-        continue;
-      }
-      // apply filter
-      if (filter.accept(lfs.getPath())) {
-        files.add(lfs.getPath().toString());
-      }
-    }
-    return files;
-  }
-
-  public static void cleanupBackupData(BackupInfo context, Configuration conf) throws IOException {
-    cleanupHLogDir(context, conf);
-    cleanupTargetDir(context, conf);
-  }
-
-  /**
-   * Clean up directories which are generated when DistCp copying hlogs.
-   * @throws IOException
-   */
-  private static void cleanupHLogDir(BackupInfo backupContext, Configuration conf)
-      throws IOException {
-
-    String logDir = backupContext.getHLogTargetDir();
-    if (logDir == null) {
-      LOG.warn("No log directory specified for " + backupContext.getBackupId());
-      return;
-    }
-
-    Path rootPath = new Path(logDir).getParent();
-    FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
-    FileStatus[] files = listStatus(fs, rootPath, null);
-    if (files == null) {
-      return;
-    }
-    for (FileStatus file : files) {
-      LOG.debug("Delete log files: " + file.getPath().getName());
-      fs.delete(file.getPath(), true);
-    }
-  }
-
-  /**
-   * Clean up the data at target directory
-   */
-  private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) {
-    try {
-      // clean up the data at target directory
-      LOG.debug("Trying to cleanup up target dir : " + backupInfo.getBackupId());
-      String targetDir = backupInfo.getTargetRootDir();
-      if (targetDir == null) {
-        LOG.warn("No target directory specified for " + backupInfo.getBackupId());
-        return;
-      }
-
-      FileSystem outputFs = FileSystem.get(new Path(backupInfo.getTargetRootDir()).toUri(), conf);
-
-      for (TableName table : backupInfo.getTables()) {
-        Path targetDirPath =
-            new Path(getTableBackupDir(backupInfo.getTargetRootDir(), backupInfo.getBackupId(),
-              table));
-        if (outputFs.delete(targetDirPath, true)) {
-          LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
-        } else {
-          LOG.info("No data has been found in " + targetDirPath.toString() + ".");
-        }
-
-        Path tableDir = targetDirPath.getParent();
-        FileStatus[] backups = listStatus(outputFs, tableDir, null);
-        if (backups == null || backups.length == 0) {
-          outputFs.delete(tableDir, true);
-          LOG.debug(tableDir.toString() + " is empty, remove it.");
-        }
-      }
-      outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true);
-    } catch (IOException e1) {
-      LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at "
-          + backupInfo.getTargetRootDir() + " failed due to " + e1.getMessage() + ".");
-    }
-  }
-
-  /**
-   * Given the backup root dir, backup id and the table name, return the backup image location,
-   * which is also where the backup manifest file is. return value look like:
-   * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/"
-   * @param backupRootDir backup root directory
-   * @param backupId backup id
-   * @param table table name
-   * @return backupPath String for the particular table
-   */
-  public static String
-      getTableBackupDir(String backupRootDir, String backupId, TableName tableName) {
-    return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
-        + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
-        + Path.SEPARATOR;
-  }
-
-  public static TableName[] parseTableNames(String tables) {
-    if (tables == null) {
-      return null;
-    }
-    String[] tableArray = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
-
-    TableName[] ret = new TableName[tableArray.length];
-    for (int i = 0; i < tableArray.length; i++) {
-      ret[i] = TableName.valueOf(tableArray[i]);
-    }
-    return ret;
-  }
-
-  /**
-   * Sort history list by start time in descending order.
-   * @param historyList history list
-   * @return sorted list of BackupCompleteData
-   */
-  public static ArrayList<BackupInfo> sortHistoryListDesc(ArrayList<BackupInfo> historyList) {
-    ArrayList<BackupInfo> list = new ArrayList<BackupInfo>();
-    TreeMap<String, BackupInfo> map = new TreeMap<String, BackupInfo>();
-    for (BackupInfo h : historyList) {
-      map.put(Long.toString(h.getStartTs()), h);
-    }
-    Iterator<String> i = map.descendingKeySet().iterator();
-    while (i.hasNext()) {
-      list.add(map.get(i.next()));
-    }
-    return list;
-  }
-
-  /**
-   * Returns WAL file name
-   * @param walFileName WAL file name
-   * @return WAL file name
-   * @throws IOException exception
-   * @throws IllegalArgumentException exception
-   */
-  public static String getUniqueWALFileNamePart(String walFileName) throws IOException {
-    return getUniqueWALFileNamePart(new Path(walFileName));
-  }
-
-  /**
-   * Returns WAL file name
-   * @param p - WAL file path
-   * @return WAL file name
-   * @throws IOException exception
-   */
-  public static String getUniqueWALFileNamePart(Path p) throws IOException {
-    return p.getName();
-  }
-
-  /**
-   * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates
-   * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and
-   * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException.
-   * @param fs file system
-   * @param dir directory
-   * @param filter path filter
-   * @return null if dir is empty or doesn't exist, otherwise FileStatus array
-   */
-  public static FileStatus[]
-      listStatus(final FileSystem fs, final Path dir, final PathFilter filter) throws IOException {
-    FileStatus[] status = null;
-    try {
-      status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
-    } catch (FileNotFoundException fnfe) {
-      // if directory doesn't exist, return null
-      if (LOG.isTraceEnabled()) {
-        LOG.trace(dir + " doesn't exist");
-      }
-    }
-    if (status == null || status.length < 1) return null;
-    return status;
-  }
-
-  /**
-   * Return the 'path' component of a Path. In Hadoop, Path is an URI. This method returns the
-   * 'path' component of a Path's URI: e.g. If a Path is
-   * <code>hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir</code>, this method returns
-   * <code>/hbase_trunk/TestTable/compaction.dir</code>. This method is useful if you want to print
-   * out a Path without qualifying Filesystem instance.
-   * @param p Filesystem Path whose 'path' component we are to return.
-   * @return Path portion of the Filesystem
-   */
-  public static String getPath(Path p) {
-    return p.toUri().getPath();
-  }
-
-  /**
-   * Given the backup root dir and the backup id, return the log file location for an incremental
-   * backup.
-   * @param backupRootDir backup root directory
-   * @param backupId backup id
-   * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738"
-   */
-  public static String getLogBackupDir(String backupRootDir, String backupId) {
-    return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
-        + HConstants.HREGION_LOGDIR_NAME;
-  }
-
-  private static List<BackupInfo> getHistory(Configuration conf, Path backupRootPath)
-      throws IOException {
-    // Get all (n) history from backup root destination
-    FileSystem fs = FileSystem.get(conf);
-    RemoteIterator<LocatedFileStatus> it = fs.listLocatedStatus(backupRootPath);
-
-    List<BackupInfo> infos = new ArrayList<BackupInfo>();
-    while (it.hasNext()) {
-      LocatedFileStatus lfs = it.next();
-      if (!lfs.isDirectory()) continue;
-      String backupId = lfs.getPath().getName();
-      try {
-        BackupInfo info = loadBackupInfo(backupRootPath, backupId, fs);
-        infos.add(info);
-      } catch(IOException e) {
-        LOG.error("Can not load backup info from: "+ lfs.getPath(), e);
-      }
-    }
-    // Sort
-    Collections.sort(infos, new Comparator<BackupInfo>() {
-
-      @Override
-      public int compare(BackupInfo o1, BackupInfo o2) {
-        long ts1 = getTimestamp(o1.getBackupId());
-        long ts2 = getTimestamp(o2.getBackupId());
-        if (ts1 == ts2) return 0;
-        return ts1 < ts2 ? 1 : -1;
-      }
-
-      private long getTimestamp(String backupId) {
-        String[] split = backupId.split("_");
-        return Long.parseLong(split[1]);
-      }
-    });
-    return infos;
-  }
-
-  public static List<BackupInfo> getHistory(Configuration conf, int n, Path backupRootPath,
-      BackupInfo.Filter... filters) throws IOException {
-    List<BackupInfo> infos = getHistory(conf, backupRootPath);
-    List<BackupInfo> ret = new ArrayList<BackupInfo>();
-    for (BackupInfo info : infos) {
-      if (ret.size() == n) {
-        break;
-      }
-      boolean passed = true;
-      for (int i = 0; i < filters.length; i++) {
-        if (!filters[i].apply(info)) {
-          passed = false;
-          break;
-        }
-      }
-      if (passed) {
-        ret.add(info);
-      }
-    }
-    return ret;
-  }
-  
-  public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, FileSystem fs)
-      throws IOException {
-    Path backupPath = new Path(backupRootPath, backupId);
-
-    RemoteIterator<LocatedFileStatus> it = fs.listFiles(backupPath, true);
-    while (it.hasNext()) {
-      LocatedFileStatus lfs = it.next();
-      if (lfs.getPath().getName().equals(BackupManifest.MANIFEST_FILE_NAME)) {
-        // Load BackupManifest
-        BackupManifest manifest = new BackupManifest(fs, lfs.getPath().getParent());
-        BackupInfo info = manifest.toBackupInfo();
-        return info;
-      }
-    }
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java
deleted file mode 100644
index 76402c7..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.util;
-import java.util.List;
-
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-/**
- * Backup set is a named group of HBase tables,
- * which are managed together by Backup/Restore  
- * framework. Instead of using list of tables in backup or restore 
- * operation, one can use set's name instead.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class BackupSet {
-  private final String name;
-  private final List<TableName> tables;
-
-  public BackupSet(String name, List<TableName> tables) {
-    this.name = name;
-    this.tables = tables;
-  }
-
-  public String getName() {
-    return name;
-  }
-
-  public List<TableName> getTables() {
-    return tables;
-  }
-
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append(name).append("={");
-    for (int i = 0; i < tables.size(); i++) {
-      sb.append(tables.get(i));
-      if (i < tables.size() - 1) {
-        sb.append(",");
-      }
-    }
-    sb.append("}");
-    return sb.toString();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 343dad4..17d5e78 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -37,8 +37,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.backup.BackupRequest;
-import org.apache.hadoop.hbase.backup.RestoreRequest;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.security.SecurityCapability;
@@ -1696,13 +1694,6 @@ public interface Admin extends Abortable, Closeable {
    * @return true if the switch is enabled, false otherwise.
    */
   boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws IOException;
-
-  /**
-   * Get Backup Admin interface 
-   * @return backup admin object
-   * @throws IOException exception
-   */
-  BackupAdmin getBackupAdmin() throws IOException;
   
   /**
    * Currently, there are only two compact types:

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java
deleted file mode 100644
index 2e5ca2a..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.client;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.Future;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.BackupRequest;
-import org.apache.hadoop.hbase.backup.RestoreRequest;
-import org.apache.hadoop.hbase.backup.util.BackupSet;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-/**
- * The administrative API for HBase Backup. Obtain an instance from 
- * an {@link Admin#getBackupAdmin()} and call {@link #close()} afterwards.
- * <p>BackupAdmin can be used to create backups, restore data from backups and for 
- * other backup-related operations. 
- *
- * @see Admin
- * @since 2.0
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-
-public interface BackupAdmin extends Closeable{
-
-  /**
-   * Backs up given list of tables fully. Synchronous operation.
-   * 
-   * @param request BackupRequest instance which contains the following members:
-   *  type whether the backup is full or incremental
-   *  tableList list of tables to backup
-   *  targetRootDir root directory for saving the backup
-   *  workers number of parallel workers. -1 - system defined
-   *  bandwidth bandwidth per worker in MB per second. -1 - unlimited
-   * @return the backup Id
-   */
-  
-  public String backupTables(final BackupRequest userRequest) throws IOException;
-  
-  /**
-   * Backs up given list of tables fully. Asynchronous operation.
-   * 
-   * @param request BackupRequest instance which contains the following members:
-   *  type whether the backup is full or incremental
-   *  tableList list of tables to backup
-   *  targetRootDir root dir for saving the backup
-   *  workers number of paralle workers. -1 - system defined
-   *  bandwidth bandwidth per worker in MB per sec. -1 - unlimited
-   * @return the backup Id future
-   */
-  public Future<String> backupTablesAsync(final BackupRequest userRequest) throws IOException;
-
-  /**
-   * Restore backup
-   * @param request - restore request
-   * @throws IOException exception
-   */
-  public void restore(RestoreRequest request) throws IOException;
-
-  /**
-   * Restore backup
-   * @param request - restore request
-   * @return Future which client can wait on
-   * @throws IOException exception
-   */
-  public Future<Void> restoreAsync(RestoreRequest request) throws IOException;
-
-  /**
-   * Describe backup image command
-   * @param backupId - backup id
-   * @return backup info
-   * @throws IOException exception
-   */
-  public BackupInfo getBackupInfo(String backupId) throws IOException;
-
-  /**
-   * Show backup progress command
-   * @param backupId - backup id (may be null)
-   * @return backup progress (0-100%), -1 if no active sessions
-   *  or session not found
-   * @throws IOException exception
-   */
-  public int getProgress(String backupId) throws IOException;
-
-  /**
-   * Delete backup image command
-   * @param backupIds - backup id
-   * @return total number of deleted sessions
-   * @throws IOException exception
-   */
-  public int deleteBackups(String[] backupIds) throws IOException;
-
-  /**
-   * Show backup history command
-   * @param n - last n backup sessions
-   * @return list of backup infos
-   * @throws IOException exception
-   */
-  public List<BackupInfo> getHistory(int n) throws IOException;
-
-
-  /**
-   * Show backup history command with filters
-   * @param n - last n backup sessions
-   * @param f - list of filters
-   * @return list of backup infos
-   * @throws IOException exception
-   */
-  public List<BackupInfo> getHistory(int n, BackupInfo.Filter ... f) throws IOException;
-
-  
-  /**
-   * Backup sets list command - list all backup sets. Backup set is 
-   * a named group of tables. 
-   * @return all registered backup sets
-   * @throws IOException exception
-   */
-  public List<BackupSet> listBackupSets() throws IOException;
-
-  /**
-   * Backup set describe command. Shows list of tables in
-   * this particular backup set.
-   * @param name set name
-   * @return backup set description or null
-   * @throws IOException exception
-   */
-  public BackupSet getBackupSet(String name) throws IOException;
-
-  /**
-   * Delete backup set command
-   * @param name - backup set name
-   * @return true, if success, false - otherwise 
-   * @throws IOException exception
-   */
-  public boolean deleteBackupSet(String name) throws IOException;
-
-  /**
-   * Add tables to backup set command
-   * @param name - name of backup set.
-   * @param tables - list of tables to be added to this set.
-   * @throws IOException exception
-   */
-  public void addToBackupSet(String name, TableName[] tables) throws IOException;
-
-  /**
-   * Remove tables from backup set
-   * @param name - name of backup set.
-   * @param tables - list of tables to be removed from this set.
-   * @throws IOException exception
-   */
-  public void removeFromBackupSet(String name, String[] tables) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 1ac43f9..c8367b9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -1400,19 +1400,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
         return stub.listProcedures(controller, request);
       }
 
-      @Override
-      public MasterProtos.BackupTablesResponse backupTables(
-          RpcController controller,
-          MasterProtos.BackupTablesRequest request)  throws ServiceException {
-        return stub.backupTables(controller, request);
-      }
-
-      @Override
-      public MasterProtos.RestoreTablesResponse restoreTables(
-          RpcController controller,
-          MasterProtos.RestoreTablesRequest request)  throws ServiceException {
-        return stub.restoreTables(controller, request);
-      }
 
       @Override
       public MasterProtos.AddColumnResponse addColumn(

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index aa4b3f6..f6ee79a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -61,9 +61,6 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.backup.BackupRequest;
-import org.apache.hadoop.hbase.backup.RestoreRequest;
-import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.security.SecurityCapability;
@@ -86,8 +83,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterReque
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
@@ -149,8 +144,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRespon
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
@@ -219,9 +212,7 @@ public class HBaseAdmin implements Admin {
   // numRetries is for 'normal' stuff... Multiply by this factor when
   // want to wait a long time.
   private final int retryLongerMultiplier;
-  private final int syncWaitTimeout;
-  private final long backupWaitTimeout;
-  private final long restoreWaitTimeout;
+  private final int syncWaitTimeout; 
   private boolean aborted;
   private int operationTimeout;
 
@@ -248,10 +239,6 @@ public class HBaseAdmin implements Admin {
         HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
     this.syncWaitTimeout = this.conf.getInt(
       "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min
-    this.backupWaitTimeout = this.conf.getInt(
-      "hbase.client.backup.wait.timeout.sec", 24 * 3600); // 24 h
-    this.restoreWaitTimeout = this.conf.getInt(
-        "hbase.client.restore.wait.timeout.sec", 24 * 3600); // 24 h
     this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf);
 
     this.ng = this.connection.getNonceGenerator();
@@ -1571,112 +1558,112 @@ public class HBaseAdmin implements Admin {
     ProtobufUtil.split(admin, hri, splitPoint);
   }
 
-  Future<String> backupTablesAsync(final BackupRequest userRequest) throws IOException {
-    BackupClientUtil.checkTargetDir(userRequest.getTargetRootDir(), conf);
-    if (userRequest.getTableList() != null) {
-      for (TableName table : userRequest.getTableList()) {
-        if (!tableExists(table)) {
-          throw new DoNotRetryIOException(table + "does not exist");
-        }
-      }
-    }
-    BackupTablesResponse response = executeCallable(
-      new MasterCallable<BackupTablesResponse>(getConnection()) {
-        @Override
-        public BackupTablesResponse call(int callTimeout) throws ServiceException {
-          BackupTablesRequest request = RequestConverter.buildBackupTablesRequest(
-            userRequest.getBackupType(), userRequest.getTableList(), userRequest.getTargetRootDir(),
-            userRequest.getWorkers(), userRequest.getBandwidth(), 
-            userRequest.getBackupSetName(), ng.getNonceGroup(),ng.newNonce());
-          return master.backupTables(null, request);
-        }
-      }, (int) backupWaitTimeout);
-    return new TableBackupFuture(this, TableName.BACKUP_TABLE_NAME, response);
-  }
-
-  String backupTables(final BackupRequest userRequest) throws IOException {
-    return get(
-      backupTablesAsync(userRequest),
-      backupWaitTimeout,
-      TimeUnit.SECONDS);
-  }
-
-  public static class TableBackupFuture extends TableFuture<String> {
-    String backupId;
-    public TableBackupFuture(final HBaseAdmin admin, final TableName tableName,
-        final BackupTablesResponse response) {
-      super(admin, tableName,
-          (response != null && response.hasProcId()) ? response.getProcId() : null);
-      backupId = response.getBackupId();
-    }
-
-    String getBackupId() {
-      return backupId;
-    }
-
-    @Override
-    public String getOperationType() {
-      return "BACKUP";
-    }
-
-    @Override
-    protected String convertResult(final GetProcedureResultResponse response) throws IOException {
-      if (response.hasException()) {
-        throw ForeignExceptionUtil.toIOException(response.getException());
-      }
-      ByteString result = response.getResult();
-      if (result == null) return null;
-      return Bytes.toStringBinary(result.toByteArray());
-    }
-
-    @Override
-    protected String postOperationResult(final String result,
-      final long deadlineTs) throws IOException, TimeoutException {
-      return result;
-    }
-  }
-
-  /**
-   * Restore operation.
-   * @param request RestoreRequest instance
-   * @throws IOException
-   */
-  public Future<Void> restoreTablesAsync(final RestoreRequest userRequest) throws IOException {
-    RestoreTablesResponse response = executeCallable(
-      new MasterCallable<RestoreTablesResponse>(getConnection()) {
-        @Override
-        public RestoreTablesResponse call(int callTimeout) throws ServiceException {
-          try {
-            RestoreTablesRequest request = RequestConverter.buildRestoreTablesRequest(
-                userRequest.getBackupRootDir(), userRequest.getBackupId(),
-                userRequest.isCheck(), userRequest.getFromTables(), userRequest.getToTables(),
-                userRequest.isOverwrite(), ng.getNonceGroup(), ng.newNonce());
-            return master.restoreTables(null, request);
-          } catch (IOException ioe) {
-            throw new ServiceException(ioe);
-          }
-        }
-      });
-    return new TableRestoreFuture(this, TableName.BACKUP_TABLE_NAME, response);
-  }
-
-  public void restoreTables(final RestoreRequest userRequest) throws IOException {
-    get(restoreTablesAsync(userRequest),
-        restoreWaitTimeout, TimeUnit.SECONDS);
-  }
-
-  private static class TableRestoreFuture extends TableFuture<Void> {
-    public TableRestoreFuture(final HBaseAdmin admin, final TableName tableName,
-        final RestoreTablesResponse response) {
-      super(admin, tableName,
-          (response != null) ? response.getProcId() : null);
-    }
-
-    @Override
-    public String getOperationType() {
-      return "RESTORE";
-    }
-  }
+//  Future<String> backupTablesAsync(final BackupRequest userRequest) throws IOException {
+//    BackupClientUtil.checkTargetDir(userRequest.getTargetRootDir(), conf);
+//    if (userRequest.getTableList() != null) {
+//      for (TableName table : userRequest.getTableList()) {
+//        if (!tableExists(table)) {
+//          throw new DoNotRetryIOException(table + "does not exist");
+//        }
+//      }
+//    }
+//    BackupTablesResponse response = executeCallable(
+//      new MasterCallable<BackupTablesResponse>(getConnection()) {
+//        @Override
+//        public BackupTablesResponse call(int callTimeout) throws ServiceException {
+//          BackupTablesRequest request = RequestConverter.buildBackupTablesRequest(
+//            userRequest.getBackupType(), userRequest.getTableList(), userRequest.getTargetRootDir(),
+//            userRequest.getWorkers(), userRequest.getBandwidth(), 
+//            userRequest.getBackupSetName(), ng.getNonceGroup(),ng.newNonce());
+//          return master.backupTables(null, request);
+//        }
+//      }, (int) backupWaitTimeout);
+//    return new TableBackupFuture(this, TableName.BACKUP_TABLE_NAME, response);
+//  }
+//
+//  String backupTables(final BackupRequest userRequest) throws IOException {
+//    return get(
+//      backupTablesAsync(userRequest),
+//      backupWaitTimeout,
+//      TimeUnit.SECONDS);
+//  }
+//
+//  public static class TableBackupFuture extends TableFuture<String> {
+//    String backupId;
+//    public TableBackupFuture(final HBaseAdmin admin, final TableName tableName,
+//        final BackupTablesResponse response) {
+//      super(admin, tableName,
+//          (response != null && response.hasProcId()) ? response.getProcId() : null);
+//      backupId = response.getBackupId();
+//    }
+//
+//    String getBackupId() {
+//      return backupId;
+//    }
+//
+//    @Override
+//    public String getOperationType() {
+//      return "BACKUP";
+//    }
+//
+//    @Override
+//    protected String convertResult(final GetProcedureResultResponse response) throws IOException {
+//      if (response.hasException()) {
+//        throw ForeignExceptionUtil.toIOException(response.getException());
+//      }
+//      ByteString result = response.getResult();
+//      if (result == null) return null;
+//      return Bytes.toStringBinary(result.toByteArray());
+//    }
+//
+//    @Override
+//    protected String postOperationResult(final String result,
+//      final long deadlineTs) throws IOException, TimeoutException {
+//      return result;
+//    }
+//  }
+//
+//  /**
+//   * Restore operation.
+//   * @param request RestoreRequest instance
+//   * @throws IOException
+//   */
+//  public Future<Void> restoreTablesAsync(final RestoreRequest userRequest) throws IOException {
+//    RestoreTablesResponse response = executeCallable(
+//      new MasterCallable<RestoreTablesResponse>(getConnection()) {
+//        @Override
+//        public RestoreTablesResponse call(int callTimeout) throws ServiceException {
+//          try {
+//            RestoreTablesRequest request = RequestConverter.buildRestoreTablesRequest(
+//                userRequest.getBackupRootDir(), userRequest.getBackupId(),
+//                userRequest.isCheck(), userRequest.getFromTables(), userRequest.getToTables(),
+//                userRequest.isOverwrite(), ng.getNonceGroup(), ng.newNonce());
+//            return master.restoreTables(null, request);
+//          } catch (IOException ioe) {
+//            throw new ServiceException(ioe);
+//          }
+//        }
+//      });
+//    return new TableRestoreFuture(this, TableName.BACKUP_TABLE_NAME, response);
+//  }
+//
+//  public void restoreTables(final RestoreRequest userRequest) throws IOException {
+//    get(restoreTablesAsync(userRequest),
+//        restoreWaitTimeout, TimeUnit.SECONDS);
+//  }
+//
+//  private static class TableRestoreFuture extends TableFuture<Void> {
+//    public TableRestoreFuture(final HBaseAdmin admin, final TableName tableName,
+//        final RestoreTablesResponse response) {
+//      super(admin, tableName,
+//          (response != null) ? response.getProcId() : null);
+//    }
+//
+//    @Override
+//    public String getOperationType() {
+//      return "RESTORE";
+//    }
+//  }
 
   @Override
   public Future<Void> modifyTable(final TableName tableName, final HTableDescriptor htd)
@@ -3544,9 +3531,5 @@ public class HBaseAdmin implements Admin {
             HConstants.EMPTY_END_ROW, false, 0);
   }
 
-  @Override
-  public BackupAdmin getBackupAdmin() throws IOException {
-    return new HBaseBackupAdmin(this);
-  }
 
 }