You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by mb...@apache.org on 2013/03/04 12:24:53 UTC

svn commit: r1452257 [10/14] - in /hbase/branches/0.94: security/src/main/java/org/apache/hadoop/hbase/security/access/ security/src/test/java/org/apache/hadoop/hbase/security/access/ src/main/jamon/org/apache/hadoop/hbase/tmpl/master/ src/main/java/or...

Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,194 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.io.IOException;
+import java.util.NavigableSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.io.Reference;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+/**
+ * Utility methods for interacting with the hbase.root file system.
+ */
+@InterfaceAudience.Private
+public final class FSVisitor {
+  private static final Log LOG = LogFactory.getLog(FSVisitor.class);
+
+  public interface StoreFileVisitor {
+    void storeFile(final String region, final String family, final String hfileName)
+       throws IOException;
+  }
+
+  public interface RecoveredEditsVisitor {
+    void recoveredEdits (final String region, final String logfile)
+      throws IOException;
+  }
+
+  public interface LogFileVisitor {
+    void logFile (final String server, final String logfile)
+      throws IOException;
+  }
+
+  private FSVisitor() {
+    // private constructor for utility class
+  }
+
+  /**
+   * Iterate over the table store files
+   *
+   * @param fs {@link FileSystem}
+   * @param tableDir {@link Path} to the table directory
+   * @param visitor callback object to get the store files
+   * @throws IOException if an error occurred while scanning the directory
+   */
+  public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir,
+      final StoreFileVisitor visitor) throws IOException {
+    FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
+    if (regions == null) {
+      LOG.info("No regions under directory:" + tableDir);
+      return;
+    }
+
+    for (FileStatus region: regions) {
+      visitRegionStoreFiles(fs, region.getPath(), visitor);
+    }
+  }
+
+  /**
+   * Iterate over the region store files
+   *
+   * @param fs {@link FileSystem}
+   * @param regionDir {@link Path} to the region directory
+   * @param visitor callback object to get the store files
+   * @throws IOException if an error occurred while scanning the directory
+   */
+  public static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir,
+      final StoreFileVisitor visitor) throws IOException {
+    FileStatus[] families = FSUtils.listStatus(fs, regionDir, new FSUtils.FamilyDirFilter(fs));
+    if (families == null) {
+      LOG.info("No families under region directory:" + regionDir);
+      return;
+    }
+
+    PathFilter fileFilter = new FSUtils.FileFilter(fs);
+    for (FileStatus family: families) {
+      Path familyDir = family.getPath();
+      String familyName = familyDir.getName();
+
+      // get all the storeFiles in the family
+      FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir, fileFilter);
+      if (storeFiles == null) {
+        LOG.debug("No hfiles found for family: " + familyDir + ", skipping.");
+        continue;
+      }
+
+      for (FileStatus hfile: storeFiles) {
+        Path hfilePath = hfile.getPath();
+        visitor.storeFile(regionDir.getName(), familyName, hfilePath.getName());
+      }
+    }
+  }
+
+  /**
+   * Iterate over each region in the table and inform about recovered.edits
+   *
+   * @param fs {@link FileSystem}
+   * @param tableDir {@link Path} to the table directory
+   * @param visitor callback object to get the recovered.edits files
+   * @throws IOException if an error occurred while scanning the directory
+   */
+  public static void visitTableRecoveredEdits(final FileSystem fs, final Path tableDir,
+      final FSVisitor.RecoveredEditsVisitor visitor) throws IOException {
+    FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
+    if (regions == null) {
+      LOG.info("No regions under directory:" + tableDir);
+      return;
+    }
+
+    for (FileStatus region: regions) {
+      visitRegionRecoveredEdits(fs, region.getPath(), visitor);
+    }
+  }
+
+  /**
+   * Iterate over recovered.edits of the specified region
+   *
+   * @param fs {@link FileSystem}
+   * @param regionDir {@link Path} to the Region directory
+   * @param visitor callback object to get the recovered.edits files
+   * @throws IOException if an error occurred while scanning the directory
+   */
+  public static void visitRegionRecoveredEdits(final FileSystem fs, final Path regionDir,
+      final FSVisitor.RecoveredEditsVisitor visitor) throws IOException {
+    NavigableSet<Path> files = HLog.getSplitEditFilesSorted(fs, regionDir);
+    if (files == null || files.size() == 0) return;
+
+    for (Path source: files) {
+      // check to see if the file is zero length, in which case we can skip it
+      FileStatus stat = fs.getFileStatus(source);
+      if (stat.getLen() <= 0) continue;
+
+      visitor.recoveredEdits(regionDir.getName(), source.getName());
+    }
+  }
+
+  /**
+   * Iterate over hbase log files
+   *
+   * @param fs {@link FileSystem}
+   * @param rootDir {@link Path} to the HBase root folder
+   * @param visitor callback object to get the log files
+   * @throws IOException if an error occurred while scanning the directory
+   */
+  public static void visitLogFiles(final FileSystem fs, final Path rootDir,
+      final LogFileVisitor visitor) throws IOException {
+    Path logsDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
+    FileStatus[] logServerDirs = FSUtils.listStatus(fs, logsDir);
+    if (logServerDirs == null) {
+      LOG.info("No logs under directory:" + logsDir);
+      return;
+    }
+
+    for (FileStatus serverLogs: logServerDirs) {
+      String serverName = serverLogs.getPath().getName();
+
+      FileStatus[] hlogs = FSUtils.listStatus(fs, serverLogs.getPath());
+      if (hlogs == null) {
+        LOG.debug("No hfiles found for server: " + serverName + ", skipping.");
+        continue;
+      }
+
+      for (FileStatus hlogRef: hlogs) {
+        visitor.logFile(serverName, hlogRef.getPath().getName());
+      }
+    }
+  }
+}

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java Mon Mar  4 11:24:50 2013
@@ -41,6 +41,21 @@ public class HFileArchiveUtil {
   /**
    * Get the directory to archive a store directory
    * @param conf {@link Configuration} to read for the archive directory name
+   * @param tableName table name under which the store currently lives
+   * @param regionName region encoded name under which the store currently lives
+   * @param family name of the family in the store
+   * @return {@link Path} to the directory to archive the given store or
+   *         <tt>null</tt> if it should not be archived
+   */
+  public static Path getStoreArchivePath(final Configuration conf, final String tableName,
+      final String regionName, final String familyName) throws IOException {
+    Path tableArchiveDir = getTableArchivePath(conf, tableName);
+    return Store.getStoreHomedir(tableArchiveDir, regionName, familyName);
+  }
+
+  /**
+   * Get the directory to archive a store directory
+   * @param conf {@link Configuration} to read for the archive directory name
    * @param region parent region information under which the store currently
    *          lives
    * @param family name of the family in the store
@@ -85,6 +100,24 @@ public class HFileArchiveUtil {
   }
 
   /**
+   * Get the archive directory for a given region under the specified table
+   * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
+   *          the archive path)
+   * @param tabledir the original table directory. Cannot be null.
+   * @param regiondir the path to the region directory. Cannot be null.
+   * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
+   *         should not be archived
+   */
+  public static Path getRegionArchiveDir(Path rootdir, Path tabledir, Path regiondir) {
+    // get the archive directory for a table
+    Path archiveDir = getTableArchivePath(rootdir, tabledir.getName());
+
+    // then add on the region path under the archive
+    String encodedRegionName = regiondir.getName();
+    return HRegion.getRegionDir(archiveDir, encodedRegionName);
+  }
+
+  /**
    * Get the path to the table archive directory based on the configured archive directory.
    * <p>
    * Get the path to the table's archive directory.
@@ -95,7 +128,35 @@ public class HFileArchiveUtil {
    */
   public static Path getTableArchivePath(Path tabledir) {
     Path root = tabledir.getParent();
-    return new Path(new Path(root,HConstants.HFILE_ARCHIVE_DIRECTORY), tabledir.getName());
+    return getTableArchivePath(root, tabledir.getName());
+  }
+
+  /**
+   * Get the path to the table archive directory based on the configured archive directory.
+   * <p>
+   * Get the path to the table's archive directory.
+   * <p>
+   * Generally of the form: /hbase/.archive/[tablename]
+   * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
+   *          the archive path)
+   * @param tableName Name of the table to be archived. Cannot be null.
+   * @return {@link Path} to the archive directory for the table
+   */
+  public static Path getTableArchivePath(final Path rootdir, final String tableName) {
+    return new Path(getArchivePath(rootdir), tableName);
+  }
+
+  /**
+   * Get the path to the table archive directory based on the configured archive directory.
+   * <p>
+   * Assumed that the table should already be archived.
+   * @param conf {@link Configuration} to read the archive directory property. Can be null
+   * @param tableName Name of the table to be archived. Cannot be null.
+   * @return {@link Path} to the archive directory for the table
+   */
+  public static Path getTableArchivePath(final Configuration conf, final String tableName)
+      throws IOException {
+    return new Path(getArchivePath(conf), tableName);
   }
 
   /**
@@ -106,6 +167,16 @@ public class HFileArchiveUtil {
    * @throws IOException if an unexpected error occurs
    */
   public static Path getArchivePath(Configuration conf) throws IOException {
-    return new Path(FSUtils.getRootDir(conf), HConstants.HFILE_ARCHIVE_DIRECTORY);
+    return getArchivePath(FSUtils.getRootDir(conf));
+  }
+
+  /**
+   * Get the full path to the archive directory on the configured {@link FileSystem}
+   * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
+   *          the archive path)
+   * @return the full {@link Path} to the archive directory, as defined by the configuration
+   */
+  private static Path getArchivePath(final Path rootdir) {
+    return new Path(rootdir, HConstants.HFILE_ARCHIVE_DIRECTORY);
   }
 }

Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,176 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.backup.HFileArchiver;
+import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+
+/**
+ * Utility methods for interacting with the regions.
+ */
+@InterfaceAudience.Private
+public abstract class ModifyRegionUtils {
+  private static final Log LOG = LogFactory.getLog(ModifyRegionUtils.class);
+
+  private ModifyRegionUtils() {
+  }
+
+  public interface RegionFillTask {
+    public void fillRegion(final HRegion region) throws IOException;
+  }
+
+  /**
+   * Create new set of regions on the specified file-system.
+   * NOTE: that you should add the regions to .META. after this operation.
+   *
+   * @param conf {@link Configuration}
+   * @param rootDir Root directory for HBase instance
+   * @param hTableDescriptor description of the table
+   * @param newRegions {@link HRegionInfo} that describes the regions to create
+   * @throws IOException
+   */
+  public static List<HRegionInfo> createRegions(final Configuration conf, final Path rootDir,
+      final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions) throws IOException {
+    return createRegions(conf, rootDir, hTableDescriptor, newRegions, null);
+  }
+
+  /**
+   * Create new set of regions on the specified file-system.
+   * NOTE: that you should add the regions to .META. after this operation.
+   *
+   * @param conf {@link Configuration}
+   * @param rootDir Root directory for HBase instance
+   * @param hTableDescriptor description of the table
+   * @param newRegions {@link HRegionInfo} that describes the regions to create
+   * @param task {@link RegionFillTask} custom code to populate region after creation
+   * @throws IOException
+   */
+  public static List<HRegionInfo> createRegions(final Configuration conf, final Path rootDir,
+      final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
+      final RegionFillTask task) throws IOException {
+    if (newRegions == null) return null;
+    int regionNumber = newRegions.length;
+    ThreadPoolExecutor regionOpenAndInitThreadPool = getRegionOpenAndInitThreadPool(conf,
+        "RegionOpenAndInitThread-" + hTableDescriptor.getNameAsString(), regionNumber);
+    CompletionService<HRegionInfo> completionService = new ExecutorCompletionService<HRegionInfo>(
+        regionOpenAndInitThreadPool);
+    List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
+    for (final HRegionInfo newRegion : newRegions) {
+      completionService.submit(new Callable<HRegionInfo>() {
+        public HRegionInfo call() throws IOException {
+          // 1. Create HRegion
+          HRegion region = HRegion.createHRegion(newRegion,
+              rootDir, conf, hTableDescriptor, null,
+              false, true);
+          try {
+            // 2. Custom user code to interact with the created region
+            if (task != null) {
+              task.fillRegion(region);
+            }
+          } finally {
+            // 3. Close the new region to flush to disk. Close log file too.
+            region.close();
+          }
+          return region.getRegionInfo();
+        }
+      });
+    }
+    try {
+      // 4. wait for all regions to finish creation
+      for (int i = 0; i < regionNumber; i++) {
+        Future<HRegionInfo> future = completionService.take();
+        HRegionInfo regionInfo = future.get();
+        regionInfos.add(regionInfo);
+      }
+    } catch (InterruptedException e) {
+      LOG.error("Caught " + e + " during region creation");
+      throw new InterruptedIOException(e.getMessage());
+    } catch (ExecutionException e) {
+      throw new IOException(e);
+    } finally {
+      regionOpenAndInitThreadPool.shutdownNow();
+    }
+    return regionInfos;
+  }
+
+  /*
+   * used by createRegions() to get the thread pool executor based on the
+   * "hbase.hregion.open.and.init.threads.max" property.
+   */
+  static ThreadPoolExecutor getRegionOpenAndInitThreadPool(final Configuration conf,
+      final String threadNamePrefix, int regionNumber) {
+    int maxThreads = Math.min(regionNumber, conf.getInt(
+        "hbase.hregion.open.and.init.threads.max", 10));
+    ThreadPoolExecutor regionOpenAndInitThreadPool = Threads
+    .getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS,
+        new ThreadFactory() {
+          private int count = 1;
+
+          public Thread newThread(Runnable r) {
+            Thread t = new Thread(r, threadNamePrefix + "-" + count++);
+            return t;
+          }
+        });
+    return regionOpenAndInitThreadPool;
+  }
+
+  /**
+   * Trigger immediate assignment of the regions in round-robin fashion
+   *
+   * @param assignmentManager
+   * @param regions
+   */
+  public static void assignRegions(final AssignmentManager assignmentManager,
+      final List<HRegionInfo> regions) throws IOException {
+    try {
+      assignmentManager.assignUserRegionsToOnlineServers(regions);
+    } catch (InterruptedException ie) {
+      LOG.error("Caught " + ie + " during round-robin assignment");
+      throw new InterruptedIOException(ie.getMessage());
+    }
+  }
+}

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java Mon Mar  4 11:24:50 2013
@@ -61,9 +61,9 @@ import org.apache.zookeeper.Op;
 import org.apache.zookeeper.Watcher;
 import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.client.ZooKeeperSaslClient;
 import org.apache.zookeeper.data.ACL;
 import org.apache.zookeeper.data.Stat;
-import org.apache.zookeeper.client.ZooKeeperSaslClient;
 import org.apache.zookeeper.server.ZooKeeperSaslServer;
 import org.apache.zookeeper.proto.CreateRequest;
 import org.apache.zookeeper.proto.DeleteRequest;
@@ -855,6 +855,10 @@ public class ZKUtil {
   /**
    * Set data into node creating node if it doesn't yet exist.
    * Does not set watch.
+   *
+   * WARNING: this is not atomic -- it is possible to get a 0-byte data value in the znode before
+   * data is written
+   *
    * @param zkw zk reference
    * @param znode path of node
    * @param data data to set for node
@@ -1067,7 +1071,7 @@ public class ZKUtil {
   }
 
   /**
-   * Creates the specified node, if the node does not exist.  Does not set a
+   * Creates the specified node, iff the node does not exist.  Does not set a
    * watch and fails silently if the node already exists.
    *
    * The node created is persistent and open access.
@@ -1078,8 +1082,24 @@ public class ZKUtil {
    */
   public static void createAndFailSilent(ZooKeeperWatcher zkw,
       String znode) throws KeeperException {
+    createAndFailSilent(zkw, znode, new byte[0]);
+  }
+
+  /**
+   * Creates the specified node containing specified data, iff the node does not exist.  Does
+   * not set a watch and fails silently if the node already exists.
+   *
+   * The node created is persistent and open access.
+   *
+   * @param zkw zk reference
+   * @param znode path of node
+   * @param data a byte array data to store in the znode
+   * @throws KeeperException if unexpected zookeeper exception
+   */
+  public static void createAndFailSilent(ZooKeeperWatcher zkw,
+      String znode, byte[] data) throws KeeperException {
     createAndFailSilent(zkw,
-      (CreateAndFailSilent)ZKUtilOp.createAndFailSilent(znode, new byte[0]));
+      (CreateAndFailSilent)ZKUtilOp.createAndFailSilent(znode, data));
   }
 
   private static void createAndFailSilent(ZooKeeperWatcher zkw, CreateAndFailSilent cafs)
@@ -1120,11 +1140,29 @@ public class ZKUtil {
    */
   public static void createWithParents(ZooKeeperWatcher zkw, String znode)
   throws KeeperException {
+    createWithParents(zkw, znode, new byte[0]);
+  }
+
+  /**
+   * Creates the specified node and all parent nodes required for it to exist.  The creation of
+   * parent znodes is not atomic with the leafe znode creation but the data is written atomically
+   * when the leaf node is created.
+   *
+   * No watches are set and no errors are thrown if the node already exists.
+   *
+   * The nodes created are persistent and open access.
+   *
+   * @param zkw zk reference
+   * @param znode path of node
+   * @throws KeeperException if unexpected zookeeper exception
+   */
+  public static void createWithParents(ZooKeeperWatcher zkw, String znode, byte[] data)
+  throws KeeperException {
     try {
       if(znode == null) {
         return;
       }
-      zkw.getRecoverableZooKeeper().create(znode, new byte[0], createACL(zkw, znode),
+      zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode),
           CreateMode.PERSISTENT);
     } catch(KeeperException.NodeExistsException nee) {
       return;
@@ -1606,4 +1644,37 @@ public class ZKUtil {
       throw new IOException(keeperEx);
     }
   }
+
+  /**
+   * Recursively print the current state of ZK (non-transactional)
+   * @param root name of the root directory in zk to print
+   * @throws KeeperException
+   */
+  public static void logZKTree(ZooKeeperWatcher zkw, String root) {
+    if (!LOG.isDebugEnabled()) return;
+    LOG.debug("Current zk system:");
+    String prefix = "|-";
+    LOG.debug(prefix + root);
+    try {
+      logZKTree(zkw, root, prefix);
+    } catch (KeeperException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Helper method to print the current state of the ZK tree.
+   * @see #logZKTree(ZooKeeperWatcher, String)
+   * @throws KeeperException if an unexpected exception occurs
+   */
+  protected static void logZKTree(ZooKeeperWatcher zkw, String root, String prefix) throws KeeperException {
+    List<String> children = ZKUtil.listChildrenNoWatch(zkw, root);
+    if (children == null) return;
+    for (String child : children) {
+      LOG.debug(prefix + child);
+      String node = ZKUtil.joinZNode(root.equals("/") ? "" : root, child);
+      logZKTree(zkw, node, prefix + "---");
+    }
+  }
+
 }

Added: hbase/branches/0.94/src/main/protobuf/ErrorHandling.proto
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/protobuf/ErrorHandling.proto?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/protobuf/ErrorHandling.proto (added)
+++ hbase/branches/0.94/src/main/protobuf/ErrorHandling.proto Mon Mar  4 11:24:50 2013
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains protocol buffers that are used for error handling
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "ErrorHandlingProtos";
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+/**
+ * Protobuf version of a java.lang.StackTraceElement
+ * so we can serialize exceptions.
+ */
+message StackTraceElementMessage {
+  optional string declaringClass = 1;
+  optional string methodName = 2;
+  optional string fileName = 3;
+  optional int32 lineNumber = 4;
+}
+
+/**
+ * Cause of a remote failure for a generic exception. Contains
+ * all the information for a generic exception as well as
+ * optional info about the error for generic info passing
+ * (which should be another protobuffed class).
+ */
+message GenericExceptionMessage {
+  optional string className = 1;
+  optional string message = 2;
+  optional bytes errorInfo = 3;
+  repeated StackTraceElementMessage trace = 4;
+}
+
+/**
+ * Exception sent across the wire when a remote task needs
+ * to notify other tasks that it failed and why
+ */
+message ForeignExceptionMessage {
+  optional string source = 1;
+  optional GenericExceptionMessage genericException = 2;
+
+}

Added: hbase/branches/0.94/src/main/protobuf/hbase.proto
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/protobuf/hbase.proto?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/protobuf/hbase.proto (added)
+++ hbase/branches/0.94/src/main/protobuf/hbase.proto Mon Mar  4 11:24:50 2013
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains protocol buffers that are shared throughout HBase
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "HBaseProtos";
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+/**
+ * Description of the snapshot to take
+ */
+message SnapshotDescription {
+  required string name = 1;
+  optional string table = 2; // not needed for delete, but checked for in taking snapshot
+  optional int64 creationTime = 3 [default = 0];
+  enum Type {
+    DISABLED = 0;
+    FLUSH = 1;
+  }
+  optional Type type = 4 [default = FLUSH];
+  optional int32 version = 5;
+}

Modified: hbase/branches/0.94/src/main/ruby/hbase/admin.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/ruby/hbase/admin.rb?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/ruby/hbase/admin.rb (original)
+++ hbase/branches/0.94/src/main/ruby/hbase/admin.rb Mon Mar  4 11:24:50 2013
@@ -586,5 +586,35 @@ module Hbase
       put.add(org.apache.hadoop.hbase.HConstants::CATALOG_FAMILY, org.apache.hadoop.hbase.HConstants::REGIONINFO_QUALIFIER, org.apache.hadoop.hbase.util.Writables.getBytes(hri))
       meta.put(put)
     end
+
+    #----------------------------------------------------------------------------------------------
+    # Take a snapshot of specified table
+    def snapshot(table, snapshot_name)
+      @admin.snapshot(snapshot_name.to_java_bytes, table.to_java_bytes)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Restore specified snapshot
+    def restore_snapshot(snapshot_name)
+      @admin.restoreSnapshot(snapshot_name.to_java_bytes)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Create a new table by cloning the snapshot content
+    def clone_snapshot(snapshot_name, table)
+      @admin.cloneSnapshot(snapshot_name.to_java_bytes, table.to_java_bytes)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Delete specified snapshot
+    def delete_snapshot(snapshot_name)
+      @admin.deleteSnapshot(snapshot_name.to_java_bytes)
+    end
+
+    #----------------------------------------------------------------------------------------------
+    # Returns a list of snapshots
+    def list_snapshot
+      @admin.listSnapshots
+    end
   end
 end

Modified: hbase/branches/0.94/src/main/ruby/shell.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/ruby/shell.rb?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/ruby/shell.rb (original)
+++ hbase/branches/0.94/src/main/ruby/shell.rb Mon Mar  4 11:24:50 2013
@@ -290,6 +290,18 @@ Shell.load_command_group(
 )
 
 Shell.load_command_group(
+  'snapshot',
+  :full_name => 'CLUSTER SNAPSHOT TOOLS',
+  :commands => %w[
+    snapshot
+    clone_snapshot
+    restore_snapshot
+    delete_snapshot
+    list_snapshots
+  ]
+)
+
+Shell.load_command_group(
   'security',
   :full_name => 'SECURITY TOOLS',
   :comment => "NOTE: Above commands are only applicable if running with the AccessController coprocessor",

Added: hbase/branches/0.94/src/main/ruby/shell/commands/clone_snapshot.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/ruby/shell/commands/clone_snapshot.rb?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/ruby/shell/commands/clone_snapshot.rb (added)
+++ hbase/branches/0.94/src/main/ruby/shell/commands/clone_snapshot.rb Mon Mar  4 11:24:50 2013
@@ -0,0 +1,40 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class CloneSnapshot < Command
+      def help
+        return <<-EOF
+Create a new table by cloning the snapshot content.
+There're no copies of data involved.
+And writing on the newly created table will not influence the snapshot data.
+
+Examples:
+  hbase> clone_snapshot 'snapshotName', 'tableName'
+EOF
+      end
+
+      def command(snapshot_name, table)
+        format_simple_command do
+          admin.clone_snapshot(snapshot_name, table)
+        end
+      end
+    end
+  end
+end

Added: hbase/branches/0.94/src/main/ruby/shell/commands/delete_snapshot.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/ruby/shell/commands/delete_snapshot.rb?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/ruby/shell/commands/delete_snapshot.rb (added)
+++ hbase/branches/0.94/src/main/ruby/shell/commands/delete_snapshot.rb Mon Mar  4 11:24:50 2013
@@ -0,0 +1,37 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class DeleteSnapshot < Command
+      def help
+        return <<-EOF
+Delete a specified snapshot. Examples:
+
+  hbase> delete_snapshot 'snapshotName',
+EOF
+      end
+
+      def command(snapshot_name)
+        format_simple_command do
+          admin.delete_snapshot(snapshot_name)
+        end
+      end
+    end
+  end
+end

Added: hbase/branches/0.94/src/main/ruby/shell/commands/list_snapshots.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/ruby/shell/commands/list_snapshots.rb?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/ruby/shell/commands/list_snapshots.rb (added)
+++ hbase/branches/0.94/src/main/ruby/shell/commands/list_snapshots.rb Mon Mar  4 11:24:50 2013
@@ -0,0 +1,52 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'time'
+
+module Shell
+  module Commands
+    class ListSnapshots < Command
+      def help
+        return <<-EOF
+List all snapshots taken (by printing the names and relative information).
+Optional regular expression parameter could be used to filter the output
+by snapshot name.
+
+Examples:
+  hbase> list_snapshots
+  hbase> list_snapshots 'abc.*'
+EOF
+      end
+
+      def command(regex = ".*")
+        now = Time.now
+        formatter.header([ "SNAPSHOT", "TABLE + CREATION TIME"])
+
+        regex = /#{regex}/ unless regex.is_a?(Regexp)
+        list = admin.list_snapshot.select {|s| regex.match(s.getName)}
+        list.each do |snapshot|
+          creation_time = Time.at(snapshot.getCreationTime() / 1000).to_s
+          formatter.row([ snapshot.getName, snapshot.getTable + " (" + creation_time + ")" ])
+        end
+
+        formatter.footer(now, list.size)
+        return list.map { |s| s.getName() }
+      end
+    end
+  end
+end

Added: hbase/branches/0.94/src/main/ruby/shell/commands/restore_snapshot.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/ruby/shell/commands/restore_snapshot.rb?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/ruby/shell/commands/restore_snapshot.rb (added)
+++ hbase/branches/0.94/src/main/ruby/shell/commands/restore_snapshot.rb Mon Mar  4 11:24:50 2013
@@ -0,0 +1,41 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class RestoreSnapshot < Command
+      def help
+        return <<-EOF
+Restore a specified snapshot.
+The restore will replace the content of the original table,
+bringing back the content to the snapshot state.
+The table must be disabled.
+
+Examples:
+  hbase> restore_snapshot 'snapshotName'
+EOF
+      end
+
+      def command(snapshot_name)
+        format_simple_command do
+          admin.restore_snapshot(snapshot_name)
+        end
+      end
+    end
+  end
+end

Added: hbase/branches/0.94/src/main/ruby/shell/commands/snapshot.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/ruby/shell/commands/snapshot.rb?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/ruby/shell/commands/snapshot.rb (added)
+++ hbase/branches/0.94/src/main/ruby/shell/commands/snapshot.rb Mon Mar  4 11:24:50 2013
@@ -0,0 +1,37 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class Snapshot < Command
+      def help
+        return <<-EOF
+Take a snapshot of specified table. Examples:
+
+  hbase> snapshot 'sourceTable', 'snapshotName'
+EOF
+      end
+
+      def command(table, snapshot_name)
+        format_simple_command do
+          admin.snapshot(table, snapshot_name)
+        end
+      end
+    end
+  end
+end

Modified: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Mon Mar  4 11:24:50 2013
@@ -1089,6 +1089,20 @@ public class HBaseTestingUtility {
     return count;
   }
 
+  public int countRows(final HTable table, final byte[]... families) throws IOException {
+    Scan scan = new Scan();
+    for (byte[] family: families) {
+      scan.addFamily(family);
+    }
+    ResultScanner results = table.getScanner(scan);
+    int count = 0;
+    for (@SuppressWarnings("unused") Result res : results) {
+      count++;
+    }
+    results.close();
+    return count;
+  }
+
   /**
    * Return an md5 digest of the entire contents of a table.
    */

Modified: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java (original)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java Mon Mar  4 11:24:50 2013
@@ -20,9 +20,15 @@ package org.apache.hadoop.hbase;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+ 
+import java.util.regex.Pattern;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -31,6 +37,7 @@ import org.junit.experimental.categories
  */
 @Category(SmallTests.class)
 public class TestHTableDescriptor {
+  final static Log LOG = LogFactory.getLog(TestHTableDescriptor.class);
 
   /**
    * Test cps in the table description
@@ -122,4 +129,42 @@ public class TestHTableDescriptor {
     desc.setMemStoreFlushSize(1111L);
     assertEquals(1111L, desc.getMemStoreFlushSize());
   }
+
+  String legalTableNames[] = { "foo", "with-dash_under.dot", "_under_start_ok",  };
+  String illegalTableNames[] = { ".dot_start_illegal", "-dash_start_illegal", "spaces not ok" };
+
+  @Test
+  public void testLegalHTableNames() {
+    for (String tn : legalTableNames) {
+      HTableDescriptor.isLegalTableName(Bytes.toBytes(tn));
+    }
+  }
+
+  @Test
+  public void testIllegalHTableNames() {
+    for (String tn : illegalTableNames) {
+      try {
+        HTableDescriptor.isLegalTableName(Bytes.toBytes(tn));
+        fail("invalid tablename " + tn + " should have failed");
+      } catch (Exception e) {
+        // expected
+      }
+    }
+  }
+
+  @Test
+  public void testLegalHTableNamesRegex() {
+    for (String tn : legalTableNames) {
+      LOG.info("Testing: '" + tn + "'");
+      assertTrue(Pattern.matches(HTableDescriptor.VALID_USER_TABLE_REGEX, tn));
+    }
+  }
+
+  @Test
+  public void testIllegalHTableNamesRegex() {
+    for (String tn : illegalTableNames) {
+      LOG.info("Testing: '" + tn + "'");
+      assertFalse(Pattern.matches(HTableDescriptor.VALID_USER_TABLE_REGEX, tn));
+    }
+  }
 }

Modified: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java (original)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java Mon Mar  4 11:24:50 2013
@@ -360,7 +360,7 @@ public class TestHFileArchiving {
 
         try {
           // Try to archive the file
-          HFileArchiver.archiveRegion(conf, fs, rootDir,
+          HFileArchiver.archiveRegion(fs, rootDir,
               sourceRegionDir.getParent(), sourceRegionDir);
 
           // The archiver succeded, the file is no longer in the original location

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,391 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.MD5Hash;
+import org.junit.*;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test clone/restore snapshots from the client
+ */
+@Category(LargeTests.class)
+public class TestRestoreSnapshotFromClient {
+  final Log LOG = LogFactory.getLog(getClass());
+
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  private final byte[] FAMILY = Bytes.toBytes("cf");
+
+  private byte[] emptySnapshot;
+  private byte[] snapshotName0;
+  private byte[] snapshotName1;
+  private byte[] snapshotName2;
+  private int snapshot0Rows;
+  private int snapshot1Rows;
+  private byte[] tableName;
+  private HBaseAdmin admin;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+    TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
+    TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
+    TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);
+    TEST_UTIL.getConfiguration().setBoolean(
+        "hbase.master.enabletable.roundrobin", true);
+    TEST_UTIL.startMiniCluster(3);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * Initialize the tests with a table filled with some data
+   * and two snapshots (snapshotName0, snapshotName1) of different states.
+   * The tableName, snapshotNames and the number of rows in the snapshot are initialized.
+   */
+  @Before
+  public void setup() throws Exception {
+    this.admin = TEST_UTIL.getHBaseAdmin();
+
+    long tid = System.currentTimeMillis();
+    tableName = Bytes.toBytes("testtb-" + tid);
+    emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
+    snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
+    snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
+    snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
+
+    // create Table and disable it
+    createTable(tableName, FAMILY);
+    admin.disableTable(tableName);
+
+    // take an empty snapshot
+    admin.snapshot(emptySnapshot, tableName);
+
+    HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
+    try {
+      // enable table and insert data
+      admin.enableTable(tableName);
+      loadData(table, 500, FAMILY);
+      snapshot0Rows = TEST_UTIL.countRows(table);
+      admin.disableTable(tableName);
+
+      // take a snapshot
+      admin.snapshot(snapshotName0, tableName);
+
+      // enable table and insert more data
+      admin.enableTable(tableName);
+      loadData(table, 500, FAMILY);
+      snapshot1Rows = TEST_UTIL.countRows(table);
+      admin.disableTable(tableName);
+
+      // take a snapshot of the updated table
+      admin.snapshot(snapshotName1, tableName);
+
+      // re-enable table
+      admin.enableTable(tableName);
+    } finally {
+      table.close();
+    }
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (admin.tableExists(tableName)) {
+      TEST_UTIL.deleteTable(tableName);
+    }
+    admin.deleteSnapshot(snapshotName0);
+    admin.deleteSnapshot(snapshotName1);
+
+    // Ensure the archiver to be empty
+    MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
+    mfs.getFileSystem().delete(
+      new Path(mfs.getRootDir(), HConstants.HFILE_ARCHIVE_DIRECTORY), true);
+  }
+
+  @Test
+  public void testRestoreSnapshot() throws IOException {
+    verifyRowCount(tableName, snapshot1Rows);
+
+    // Restore from snapshot-0
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(snapshotName0);
+    admin.enableTable(tableName);
+    verifyRowCount(tableName, snapshot0Rows);
+
+    // Restore from emptySnapshot
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(emptySnapshot);
+    admin.enableTable(tableName);
+    verifyRowCount(tableName, 0);
+
+    // Restore from snapshot-1
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(snapshotName1);
+    admin.enableTable(tableName);
+    verifyRowCount(tableName, snapshot1Rows);
+  }
+
+  @Test
+  public void testRestoreSchemaChange() throws IOException {
+    byte[] TEST_FAMILY2 = Bytes.toBytes("cf2");
+
+    HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
+
+    // Add one column family and put some data in it
+    admin.disableTable(tableName);
+    admin.addColumn(tableName, new HColumnDescriptor(TEST_FAMILY2));
+    admin.enableTable(tableName);
+    assertEquals(2, table.getTableDescriptor().getFamilies().size());
+    HTableDescriptor htd = admin.getTableDescriptor(tableName);
+    assertEquals(2, htd.getFamilies().size());
+    loadData(table, 500, TEST_FAMILY2);
+    long snapshot2Rows = snapshot1Rows + 500;
+    assertEquals(snapshot2Rows, TEST_UTIL.countRows(table));
+    assertEquals(500, TEST_UTIL.countRows(table, TEST_FAMILY2));
+    Set<String> fsFamilies = getFamiliesFromFS(tableName);
+    assertEquals(2, fsFamilies.size());
+    table.close();
+
+    // Take a snapshot
+    admin.disableTable(tableName);
+    admin.snapshot(snapshotName2, tableName);
+
+    // Restore the snapshot (without the cf)
+    admin.restoreSnapshot(snapshotName0);
+    assertEquals(1, table.getTableDescriptor().getFamilies().size());
+    admin.enableTable(tableName);
+    try {
+      TEST_UTIL.countRows(table, TEST_FAMILY2);
+      fail("family '" + Bytes.toString(TEST_FAMILY2) + "' should not exists");
+    } catch (NoSuchColumnFamilyException e) {
+      // expected
+    }
+    assertEquals(snapshot0Rows, TEST_UTIL.countRows(table));
+    htd = admin.getTableDescriptor(tableName);
+    assertEquals(1, htd.getFamilies().size());
+    fsFamilies = getFamiliesFromFS(tableName);
+    assertEquals(1, fsFamilies.size());
+    table.close();
+
+    // Restore back the snapshot (with the cf)
+    admin.disableTable(tableName);
+    admin.restoreSnapshot(snapshotName2);
+    admin.enableTable(tableName);
+    htd = admin.getTableDescriptor(tableName);
+    assertEquals(2, htd.getFamilies().size());
+    assertEquals(2, table.getTableDescriptor().getFamilies().size());
+    assertEquals(500, TEST_UTIL.countRows(table, TEST_FAMILY2));
+    assertEquals(snapshot2Rows, TEST_UTIL.countRows(table));
+    fsFamilies = getFamiliesFromFS(tableName);
+    assertEquals(2, fsFamilies.size());
+    table.close();
+  }
+
+  @Test(expected=SnapshotDoesNotExistException.class)
+  public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
+    String snapshotName = "random-snapshot-" + System.currentTimeMillis();
+    String tableName = "random-table-" + System.currentTimeMillis();
+    admin.cloneSnapshot(snapshotName, tableName);
+  }
+
+  @Test
+  public void testCloneSnapshot() throws IOException, InterruptedException {
+    byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis());
+    testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
+    testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
+    testCloneSnapshot(clonedTableName, emptySnapshot, 0);
+  }
+
+  private void testCloneSnapshot(final byte[] tableName, final byte[] snapshotName,
+      int snapshotRows) throws IOException, InterruptedException {
+    // create a new table from snapshot
+    admin.cloneSnapshot(snapshotName, tableName);
+    verifyRowCount(tableName, snapshotRows);
+
+    admin.disableTable(tableName);
+    admin.deleteTable(tableName);
+  }
+
+  @Test
+  public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException {
+    byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis());
+    admin.cloneSnapshot(snapshotName0, clonedTableName);
+    verifyRowCount(clonedTableName, snapshot0Rows);
+    admin.disableTable(clonedTableName);
+    admin.snapshot(snapshotName2, clonedTableName);
+    admin.deleteTable(clonedTableName);
+    waitCleanerRun();
+
+    admin.cloneSnapshot(snapshotName2, clonedTableName);
+    verifyRowCount(clonedTableName, snapshot0Rows);
+    admin.disableTable(clonedTableName);
+    admin.deleteTable(clonedTableName);
+  }
+
+  /**
+   * Verify that tables created from the snapshot are still alive after source table deletion.
+   */
+  @Test
+  public void testCloneLinksAfterDelete() throws IOException, InterruptedException {
+    // Clone a table from the first snapshot
+    byte[] clonedTableName = Bytes.toBytes("clonedtb1-" + System.currentTimeMillis());
+    admin.cloneSnapshot(snapshotName0, clonedTableName);
+    verifyRowCount(clonedTableName, snapshot0Rows);
+
+    // Take a snapshot of this cloned table.
+    admin.disableTable(clonedTableName);
+    admin.snapshot(snapshotName2, clonedTableName);
+
+    // Clone the snapshot of the cloned table
+    byte[] clonedTableName2 = Bytes.toBytes("clonedtb2-" + System.currentTimeMillis());
+    admin.cloneSnapshot(snapshotName2, clonedTableName2);
+    verifyRowCount(clonedTableName2, snapshot0Rows);
+    admin.disableTable(clonedTableName2);
+
+    // Remove the original table
+    admin.disableTable(tableName);
+    admin.deleteTable(tableName);
+    waitCleanerRun();
+
+    // Verify the first cloned table
+    admin.enableTable(clonedTableName);
+    verifyRowCount(clonedTableName, snapshot0Rows);
+
+    // Verify the second cloned table
+    admin.enableTable(clonedTableName2);
+    verifyRowCount(clonedTableName2, snapshot0Rows);
+    admin.disableTable(clonedTableName2);
+
+    // Delete the first cloned table
+    admin.disableTable(clonedTableName);
+    admin.deleteTable(clonedTableName);
+    waitCleanerRun();
+
+    // Verify the second cloned table
+    admin.enableTable(clonedTableName2);
+    verifyRowCount(clonedTableName2, snapshot0Rows);
+
+    // Clone a new table from cloned
+    byte[] clonedTableName3 = Bytes.toBytes("clonedtb3-" + System.currentTimeMillis());
+    admin.cloneSnapshot(snapshotName2, clonedTableName3);
+    verifyRowCount(clonedTableName3, snapshot0Rows);
+
+    // Delete the cloned tables
+    admin.disableTable(clonedTableName2);
+    admin.deleteTable(clonedTableName2);
+    admin.disableTable(clonedTableName3);
+    admin.deleteTable(clonedTableName3);
+    admin.deleteSnapshot(snapshotName2);
+  }
+
+  // ==========================================================================
+  //  Helpers
+  // ==========================================================================
+  private void createTable(final byte[] tableName, final byte[]... families) throws IOException {
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    for (byte[] family: families) {
+      HColumnDescriptor hcd = new HColumnDescriptor(family);
+      htd.addFamily(hcd);
+    }
+    byte[][] splitKeys = new byte[16][];
+    byte[] hex = Bytes.toBytes("0123456789abcdef");
+    for (int i = 0; i < 16; ++i) {
+      splitKeys[i] = new byte[] { hex[i] };
+    }
+    admin.createTable(htd, splitKeys);
+  }
+
+  public void loadData(final HTable table, int rows, byte[]... families) throws IOException {
+    byte[] qualifier = Bytes.toBytes("q");
+    table.setAutoFlush(false);
+    while (rows-- > 0) {
+      byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows));
+      byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value));
+      Put put = new Put(key);
+      put.setWriteToWAL(false);
+      for (byte[] family: families) {
+        put.add(family, qualifier, value);
+      }
+      table.put(put);
+    }
+    table.flushCommits();
+  }
+
+  private void waitCleanerRun() throws InterruptedException {
+    TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
+  }
+
+  private Set<String> getFamiliesFromFS(final byte[] tableName) throws IOException {
+    MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
+    Set<String> families = new HashSet<String>();
+    Path tableDir = HTableDescriptor.getTableDir(mfs.getRootDir(), tableName);
+    for (Path regionDir: FSUtils.getRegionDirs(mfs.getFileSystem(), tableDir)) {
+      for (Path familyDir: FSUtils.getFamilyDirs(mfs.getFileSystem(), regionDir)) {
+        families.add(familyDir.getName());
+      }
+    }
+    return families;
+  }
+
+  private void verifyRowCount(final byte[] tableName, long expectedRows) throws IOException {
+    HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
+    assertEquals(expectedRows, TEST_UTIL.countRows(table));
+    table.close();
+  }
+}

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,151 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.ipc.HMasterInterface;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.HSnapshotDescription;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+import com.google.protobuf.RpcController;
+
+/**
+ * Test snapshot logic from the client
+ */
+@Category(SmallTests.class)
+public class TestSnapshotFromAdmin {
+
+  private static final Log LOG = LogFactory.getLog(TestSnapshotFromAdmin.class);
+
+  /**
+   * Test that the logic for doing 'correct' back-off based on exponential increase and the max-time
+   * passed from the server ensures the correct overall waiting for the snapshot to finish.
+   * @throws Exception
+   */
+  @Test(timeout = 10000)
+  public void testBackoffLogic() throws Exception {
+    final int maxWaitTime = 7500;
+    final int numRetries = 10;
+    final int pauseTime = 500;
+    // calculate the wait time, if we just do straight backoff (ignoring the expected time from
+    // master)
+    long ignoreExpectedTime = 0;
+    for (int i = 0; i < 6; i++) {
+      ignoreExpectedTime += HConstants.RETRY_BACKOFF[i] * pauseTime;
+    }
+    // the correct wait time, capping at the maxTime/tries + fudge room
+    final long time = pauseTime * 3 + ((maxWaitTime / numRetries) * 3) + 300;
+    assertTrue("Capped snapshot wait time isn't less that the uncapped backoff time "
+        + "- further testing won't prove anything.", time < ignoreExpectedTime);
+
+    // setup the mocks
+    HConnectionManager.HConnectionImplementation mockConnection = Mockito
+        .mock(HConnectionManager.HConnectionImplementation.class);
+    Configuration conf = HBaseConfiguration.create();
+    // setup the conf to match the expected properties
+    conf.setInt("hbase.client.retries.number", numRetries);
+    conf.setLong("hbase.client.pause", pauseTime);
+    // mock the master admin to our mock
+    HMasterInterface mockMaster = Mockito.mock(HMasterInterface.class);
+    Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
+    Mockito.when(mockConnection.getMaster()).thenReturn(mockMaster);
+    // set the max wait time for the snapshot to complete
+    Mockito
+        .when(
+          mockMaster.snapshot(
+            Mockito.any(HSnapshotDescription.class))).thenReturn((long)maxWaitTime);
+
+    // first five times, we return false, last we get success
+    Mockito.when(
+      mockMaster.isSnapshotDone(
+        Mockito.any(HSnapshotDescription.class))).thenReturn(false, false,
+      false, false, false, true);
+
+    // setup the admin and run the test
+    HBaseAdmin admin = new HBaseAdmin(mockConnection);
+    String snapshot = "snapshot";
+    String table = "table";
+    // get start time
+    long start = System.currentTimeMillis();
+    admin.snapshot(snapshot, table);
+    long finish = System.currentTimeMillis();
+    long elapsed = (finish - start);
+    assertTrue("Elapsed time:" + elapsed + " is more than expected max:" + time, elapsed <= time);
+    admin.close();
+  }
+
+  /**
+   * Make sure that we validate the snapshot name and the table name before we pass anything across
+   * the wire
+   * @throws Exception on failure
+   */
+  @Test
+  public void testValidateSnapshotName() throws Exception {
+    HConnectionManager.HConnectionImplementation mockConnection = Mockito
+        .mock(HConnectionManager.HConnectionImplementation.class);
+    Configuration conf = HBaseConfiguration.create();
+    Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
+    HBaseAdmin admin = new HBaseAdmin(mockConnection);
+    SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
+    // check that invalid snapshot names fail
+    failSnapshotStart(admin, builder.setName(".snapshot").build());
+    failSnapshotStart(admin, builder.setName("-snapshot").build());
+    failSnapshotStart(admin, builder.setName("snapshot fails").build());
+    failSnapshotStart(admin, builder.setName("snap$hot").build());
+    // check the table name also get verified
+    failSnapshotStart(admin, builder.setName("snapshot").setTable(".table").build());
+    failSnapshotStart(admin, builder.setName("snapshot").setTable("-table").build());
+    failSnapshotStart(admin, builder.setName("snapshot").setTable("table fails").build());
+    failSnapshotStart(admin, builder.setName("snapshot").setTable("tab%le").build());
+
+    // mock the master connection
+    HMasterInterface master = Mockito.mock(HMasterInterface.class);
+    Mockito.when(mockConnection.getMaster()).thenReturn(master);
+
+    Mockito.when(
+      master.snapshot(Mockito.any(HSnapshotDescription.class))).thenReturn((long)0);
+    Mockito.when(
+      master.isSnapshotDone(
+        Mockito.any(HSnapshotDescription.class))).thenReturn(true);
+
+      // make sure that we can use valid names
+    admin.snapshot(builder.setName("snapshot").setTable("table").build());
+  }
+
+  private void failSnapshotStart(HBaseAdmin admin, SnapshotDescription snapshot) throws IOException {
+    try {
+      admin.snapshot(snapshot);
+      fail("Snapshot should not have succeed with name:" + snapshot.getName());
+    } catch (IllegalArgumentException e) {
+      LOG.debug("Correctly failed to start snapshot:" + e.getMessage());
+    }
+  }
+}

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,231 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+import org.apache.hadoop.hbase.snapshot.HSnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test create/using/deleting snapshots from the client
+ * <p>
+ * This is an end-to-end test for the snapshot utility
+ */
+@Category(LargeTests.class)
+public class TestSnapshotFromClient {
+  private static final Log LOG = LogFactory.getLog(TestSnapshotFromClient.class);
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  private static final int NUM_RS = 2;
+  private static final String STRING_TABLE_NAME = "test";
+  private static final byte[] TEST_FAM = Bytes.toBytes("fam");
+  private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME);
+
+  /**
+   * Setup the config for the cluster
+   * @throws Exception on failure
+   */
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    setupConf(UTIL.getConfiguration());
+    UTIL.startMiniCluster(NUM_RS);
+  }
+
+  private static void setupConf(Configuration conf) {
+    // disable the ui
+    conf.setInt("hbase.regionsever.info.port", -1);
+    // change the flush size to a small amount, regulating number of store files
+    conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+    // so make sure we get a compaction when doing a load, but keep around some
+    // files in the store
+    conf.setInt("hbase.hstore.compaction.min", 10);
+    conf.setInt("hbase.hstore.compactionThreshold", 10);
+    // block writes if we get to 12 store files
+    conf.setInt("hbase.hstore.blockingStoreFiles", 12);
+    // drop the number of attempts for the hbase admin
+    conf.setInt("hbase.client.retries.number", 1);
+    // Enable snapshot
+    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+    // prevent aggressive region split
+    conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+      ConstantSizeRegionSplitPolicy.class.getName());
+  }
+
+  @Before
+  public void setup() throws Exception {
+    UTIL.createTable(TABLE_NAME, TEST_FAM);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    UTIL.deleteTable(TABLE_NAME);
+    // and cleanup the archive directory
+    try {
+      UTIL.getTestFileSystem().delete(new Path(UTIL.getDefaultRootDirPath(), ".archive"), true);
+    } catch (IOException e) {
+      LOG.warn("Failure to delete archive directory", e);
+    }
+  }
+
+  @AfterClass
+  public static void cleanupTest() throws Exception {
+    try {
+      UTIL.shutdownMiniCluster();
+    } catch (Exception e) {
+      LOG.warn("failure shutting down cluster", e);
+    }
+  }
+
+  /**
+   * Test snapshotting not allowed .META. and -ROOT-
+   * @throws Exception
+   */
+  @Test
+  public void testMetaTablesSnapshot() throws Exception {
+    HBaseAdmin admin = UTIL.getHBaseAdmin();
+    byte[] snapshotName = Bytes.toBytes("metaSnapshot");
+
+    try {
+      admin.snapshot(snapshotName, HConstants.META_TABLE_NAME);
+      fail("taking a snapshot of .META. should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+
+    try {
+      admin.snapshot(snapshotName, HConstants.ROOT_TABLE_NAME);
+      fail("taking a snapshot of -ROOT- should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+  }
+
+  /**
+   * Test snapshotting a table that is offline
+   * @throws Exception
+   */
+  @Test
+  public void testOfflineTableSnapshot() throws Exception {
+    HBaseAdmin admin = UTIL.getHBaseAdmin();
+    // make sure we don't fail on listing snapshots
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+
+    // put some stuff in the table
+    HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
+    UTIL.loadTable(table, TEST_FAM);
+
+    // get the name of all the regionservers hosting the snapshotted table
+    Set<String> snapshotServers = new HashSet<String>();
+    List<RegionServerThread> servers = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads();
+    for (RegionServerThread server : servers) {
+      if (server.getRegionServer().getOnlineRegions(TABLE_NAME).size() > 0) {
+        snapshotServers.add(server.getRegionServer().getServerName().toString());
+      }
+    }
+
+    LOG.debug("FS state before disable:");
+    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+      FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+    // XXX if this is flakey, might want to consider using the async version and looping as
+    // disableTable can succeed and still timeout.
+    admin.disableTable(TABLE_NAME);
+
+    LOG.debug("FS state before snapshot:");
+    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+      FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+    // take a snapshot of the disabled table
+    byte[] snapshot = Bytes.toBytes("offlineTableSnapshot");
+    admin.snapshot(snapshot, TABLE_NAME);
+    LOG.debug("Snapshot completed.");
+
+    // make sure we have the snapshot
+    List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
+      snapshot, TABLE_NAME);
+
+    // make sure its a valid snapshot
+    FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+    Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+    LOG.debug("FS state after snapshot:");
+    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+      FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+    SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
+      admin, fs, false, new Path(rootDir, HConstants.HREGION_LOGDIR_NAME), snapshotServers);
+
+    admin.deleteSnapshot(snapshot);
+    snapshots = admin.listSnapshots();
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+  }
+
+  @Test
+  public void testSnapshotFailsOnNonExistantTable() throws Exception {
+    HBaseAdmin admin = UTIL.getHBaseAdmin();
+    // make sure we don't fail on listing snapshots
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+    String tableName = "_not_a_table";
+
+    // make sure the table doesn't exist
+    boolean fail = false;
+    do {
+    try {
+      admin.getTableDescriptor(Bytes.toBytes(tableName));
+      fail = true;
+          LOG.error("Table:" + tableName + " already exists, checking a new name");
+      tableName = tableName+"!";
+    } catch (TableNotFoundException e) {
+      fail = false;
+      }
+    } while (fail);
+
+    // snapshot the non-existant table
+    try {
+      admin.snapshot("fail", tableName);
+      fail("Snapshot succeeded even though there is not table.");
+    } catch (SnapshotCreationException e) {
+      LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage());
+    }
+  }
+}

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotsFromAdmin.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotsFromAdmin.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotsFromAdmin.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotsFromAdmin.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.ipc.HMasterInterface;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.HSnapshotDescription;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+import com.google.protobuf.RpcController;
+
+/**
+ * Test snapshot logic from the client
+ */
+@Category(SmallTests.class)
+public class TestSnapshotsFromAdmin {
+
+  private static final Log LOG = LogFactory.getLog(TestSnapshotsFromAdmin.class);
+
+  /**
+   * Test that the logic for doing 'correct' back-off based on exponential increase and the max-time
+   * passed from the server ensures the correct overall waiting for the snapshot to finish.
+   * @throws Exception
+   */
+  @Test(timeout = 10000)
+  public void testBackoffLogic() throws Exception {
+    final int maxWaitTime = 7500;
+    final int numRetries = 10;
+    final int pauseTime = 500;
+    // calculate the wait time, if we just do straight backoff (ignoring the expected time from
+    // master)
+    long ignoreExpectedTime = 0;
+    for (int i = 0; i < 6; i++) {
+      ignoreExpectedTime += HConstants.RETRY_BACKOFF[i] * pauseTime;
+    }
+    // the correct wait time, capping at the maxTime/tries + fudge room
+    final long time = pauseTime * 3 + ((maxWaitTime / numRetries) * 3) + 300;
+    assertTrue("Capped snapshot wait time isn't less that the uncapped backoff time "
+        + "- further testing won't prove anything.", time < ignoreExpectedTime);
+
+    // setup the mocks
+    HConnectionManager.HConnectionImplementation mockConnection = Mockito
+        .mock(HConnectionManager.HConnectionImplementation.class);
+    Configuration conf = HBaseConfiguration.create();
+    // setup the conf to match the expected properties
+    conf.setInt("hbase.client.retries.number", numRetries);
+    conf.setLong("hbase.client.pause", pauseTime);
+    // mock the master admin to our mock
+    HMasterInterface mockMaster = Mockito.mock(HMasterInterface.class);
+    Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
+    Mockito.when(mockConnection.getMaster()).thenReturn(mockMaster);
+    // set the max wait time for the snapshot to complete
+    Mockito
+        .when(
+          mockMaster.snapshot(
+            Mockito.any(HSnapshotDescription.class))).thenReturn((long)maxWaitTime);
+    // first five times, we return false, last we get success
+    Mockito.when(
+      mockMaster.isSnapshotDone(
+        Mockito.any(HSnapshotDescription.class))).thenReturn(false, false,
+          false, false, false, true);
+
+    // setup the admin and run the test
+    HBaseAdmin admin = new HBaseAdmin(mockConnection);
+    String snapshot = "snasphot";
+    String table = "table";
+    // get start time
+    long start = System.currentTimeMillis();
+    admin.snapshot(snapshot, table);
+    long finish = System.currentTimeMillis();
+    long elapsed = (finish - start);
+    assertTrue("Elapsed time:" + elapsed + " is more than expected max:" + time, elapsed <= time);
+  }
+
+  /**
+   * Make sure that we validate the snapshot name and the table name before we pass anything across
+   * the wire
+   * @throws IOException on failure
+   */
+  @Test
+  public void testValidateSnapshotName() throws IOException {
+    HConnectionManager.HConnectionImplementation mockConnection = Mockito
+        .mock(HConnectionManager.HConnectionImplementation.class);
+    Configuration conf = HBaseConfiguration.create();
+    Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
+    HBaseAdmin admin = new HBaseAdmin(mockConnection);
+    SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
+    // check that invalid snapshot names fail
+    failSnapshotStart(admin, builder.setName(".snapshot").build());
+    failSnapshotStart(admin, builder.setName("-snapshot").build());
+    failSnapshotStart(admin, builder.setName("snapshot fails").build());
+    failSnapshotStart(admin, builder.setName("snap$hot").build());
+    // check the table name also get verified
+    failSnapshotStart(admin, builder.setName("snapshot").setTable(".table").build());
+    failSnapshotStart(admin, builder.setName("snapshot").setTable("-table").build());
+    failSnapshotStart(admin, builder.setName("snapshot").setTable("table fails").build());
+    failSnapshotStart(admin, builder.setName("snapshot").setTable("tab%le").build());
+  }
+
+  private void failSnapshotStart(HBaseAdmin admin, SnapshotDescription snapshot) throws IOException {
+    try {
+      admin.snapshot(snapshot);
+      fail("Snapshot should not have succeed with name:" + snapshot.getName());
+    } catch (IllegalArgumentException e) {
+      LOG.debug("Correctly failed to start snapshot:" + e.getMessage());
+    }
+  }
+}