You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by mb...@apache.org on 2013/03/04 12:24:53 UTC

svn commit: r1452257 [2/14] - in /hbase/branches/0.94: security/src/main/java/org/apache/hadoop/hbase/security/access/ security/src/test/java/org/apache/hadoop/hbase/security/access/ src/main/jamon/org/apache/hadoop/hbase/tmpl/master/ src/main/java/org...

Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.errorhandling;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * The ForeignExceptionListener is an interface for objects that can receive a ForeignException.
+ * <p>
+ * Implementations must be thread-safe, because this is expected to be used to propagate exceptions
+ * from foreign threads.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface ForeignExceptionListener {
+
+  /**
+   * Receive a ForeignException.
+   * <p>
+   * Implementers must ensure that this method is thread-safe.
+   * @param e exception causing the error. Implementations must accept and handle null here.
+   */
+  public void receive(ForeignException e);
+}
\ No newline at end of file

Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.errorhandling;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This is an interface for a cooperative exception throwing mechanism.  Implementations are
+ * containers that holds an exception from a separate thread. This can be used to receive
+ * exceptions from 'foreign' threads or from separate 'foreign' processes.
+ * <p>
+ * To use, one would pass an implementation of this object to a long running method and
+ * periodically check by calling {@link #rethrowException()}.  If any foreign exceptions have
+ * been received, the calling thread is then responsible for handling the rethrown exception.
+ * <p>
+ * One could use the boolean {@link #hasException()} to determine if there is an exceptoin as well.
+ * <p>
+ * NOTE: This is very similar to the InterruptedException/interrupt/interrupted pattern.  There,
+ * the notification state is bound to a Thread.  Using this, applications receive Exceptions in
+ * the snare.  The snare is referenced and checked by multiple threads which enables exception 
+ * notification in all the involved threads/processes. 
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface ForeignExceptionSnare {
+
+  /**
+   * Rethrow an exception currently held by the {@link ForeignExceptionSnare}. If there is
+   * no exception this is a no-op
+   *
+   * @throws ForeignException
+   *           all exceptions from remote sources are procedure exceptions
+   */
+  public void rethrowException() throws ForeignException;
+
+  /**
+   * Non-exceptional form of {@link #rethrowException()}. Checks to see if any
+   * process to which the exception checkers is bound has created an error that
+   * would cause a failure.
+   *
+   * @return <tt>true</tt> if there has been an error,<tt>false</tt> otherwise
+   */
+  public boolean hasException();
+
+  /**
+   * Get the value of the captured exception.
+   *
+   * @return the captured foreign exception or null if no exception captured.
+   */
+  public ForeignException getException();
+}

Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.errorhandling;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Exception for timeout of a task.
+ * @see TimeoutExceptionInjector
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+@SuppressWarnings("serial")
+public class TimeoutException extends Exception {
+
+  private final String sourceName;
+  private final long start;
+  private final long end;
+  private final long expected;
+
+  /**
+   * Exception indicating that an operation attempt has timed out
+   * @param start time the operation started (ms since epoch)
+   * @param end time the timeout was triggered (ms since epoch)
+   * @param expected expected amount of time for the operation to complete (ms) (ideally, expected <= end-start)
+   */
+  public TimeoutException(String sourceName, long start, long end, long expected) {
+    super("Timeout elapsed! Source:" + sourceName + " Start:" + start + ", End:" + end
+        + ", diff:" + (end - start) + ", max:" + expected + " ms");
+    this.sourceName = sourceName;
+    this.start = start;
+    this.end = end;
+    this.expected = expected;
+  }
+
+  public long getStart() {
+    return start;
+  }
+
+  public long getEnd() {
+    return end;
+  }
+
+  public long getMaxAllowedOperationTime() {
+    return expected;
+  }
+
+  public String getSourceName() {
+    return sourceName;
+  }
+}
\ No newline at end of file

Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.errorhandling;
+
+import java.util.Timer;
+import java.util.TimerTask;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * Time a given process/operation and report a failure if the elapsed time exceeds the max allowed
+ * time.
+ * <p>
+ * The timer won't start tracking time until calling {@link #start()}. If {@link #complete()} or
+ * {@link #trigger()} is called before {@link #start()}, calls to {@link #start()} will fail.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class TimeoutExceptionInjector {
+
+  private static final Log LOG = LogFactory.getLog(TimeoutExceptionInjector.class);
+
+  private final long maxTime;
+  private volatile boolean complete;
+  private final Timer timer;
+  private final TimerTask timerTask;
+  private long start = -1;
+
+  /**
+   * Create a generic timer for a task/process.
+   * @param listener listener to notify if the process times out
+   * @param maxTime max allowed running time for the process. Timer starts on calls to
+   *          {@link #start()}
+   */
+  public TimeoutExceptionInjector(final ForeignExceptionListener listener, final long maxTime) {
+    this.maxTime = maxTime;
+    timer = new Timer();
+    timerTask = new TimerTask() {
+      @Override
+      public void run() {
+        // ensure we don't run this task multiple times
+        synchronized (this) {
+          // quick exit if we already marked the task complete
+          if (TimeoutExceptionInjector.this.complete) return;
+          // mark the task is run, to avoid repeats
+          TimeoutExceptionInjector.this.complete = true;
+        }
+        long end = EnvironmentEdgeManager.currentTimeMillis();
+        TimeoutException tee =  new TimeoutException(
+            "Timeout caused Foreign Exception", start, end, maxTime);
+        String source = "timer-" + timer;
+        listener.receive(new ForeignException(source, tee));
+      }
+    };
+  }
+
+  public long getMaxTime() {
+    return maxTime;
+  }
+
+  /**
+   * For all time forward, do not throw an error because the process has completed.
+   */
+  public void complete() {
+    // warn if the timer is already marked complete. This isn't going to be thread-safe, but should
+    // be good enough and its not worth locking just for a warning.
+    if (this.complete) {
+      LOG.warn("Timer already marked completed, ignoring!");
+      return;
+    }
+    LOG.debug("Marking timer as complete - no error notifications will be received for this timer.");
+    synchronized (this.timerTask) {
+      this.complete = true;
+    }
+    this.timer.cancel();
+  }
+
+  /**
+   * Start a timer to fail a process if it takes longer than the expected time to complete.
+   * <p>
+   * Non-blocking.
+   * @throws IllegalStateException if the timer has already been marked done via {@link #complete()}
+   *           or {@link #trigger()}
+   */
+  public synchronized void start() throws IllegalStateException {
+    if (this.start >= 0) {
+      LOG.warn("Timer already started, can't be started again. Ignoring second request.");
+      return;
+    }
+    LOG.debug("Scheduling process timer to run in: " + maxTime + " ms");
+    timer.schedule(timerTask, maxTime);
+    this.start = EnvironmentEdgeManager.currentTimeMillis();
+  }
+
+  /**
+   * Trigger the timer immediately.
+   * <p>
+   * Exposed for testing.
+   */
+  public void trigger() {
+    synchronized (timerTask) {
+      if (this.complete) {
+        LOG.warn("Timer already completed, not triggering.");
+        return;
+      }
+      LOG.debug("Triggering timer immediately!");
+      this.timer.cancel();
+      this.timerTask.run();
+    }
+  }
+}
\ No newline at end of file

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java Mon Mar  4 11:24:50 2013
@@ -128,6 +128,8 @@ public abstract class EventHandler imple
     C_M_DELETE_FAMILY         (45),   // Client asking Master to delete family of table
     C_M_MODIFY_FAMILY         (46),   // Client asking Master to modify family of table
     C_M_CREATE_TABLE          (47),   // Client asking Master to create a table
+    C_M_SNAPSHOT_TABLE        (48),   // Client asking Master to snapshot an offline table
+    C_M_RESTORE_SNAPSHOT      (49),   // Client asking Master to snapshot an offline table
 
     // Updates from master to ZK. This is done by the master and there is
     // nothing to process by either Master or RS

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java Mon Mar  4 11:24:50 2013
@@ -136,6 +136,8 @@ public class ExecutorService {
       case C_M_ENABLE_TABLE:
       case C_M_MODIFY_TABLE:
       case C_M_CREATE_TABLE:
+      case C_M_SNAPSHOT_TABLE:
+      case C_M_RESTORE_SNAPSHOT:
         return ExecutorType.MASTER_TABLE_OPERATIONS;
 
       // RegionServer executor services

Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/FileLink.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/FileLink.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/FileLink.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/FileLink.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,455 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io;
+
+import java.util.Collection;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.FileNotFoundException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
+
+/**
+ * The FileLink is a sort of hardlink, that allows to access a file given a set of locations.
+ *
+ * <p><b>The Problem:</b>
+ * <ul>
+ *  <li>
+ *    HDFS doesn't have support for hardlinks, and this make impossible to referencing
+ *    the same data blocks using different names.
+ *  </li>
+ *  <li>
+ *    HBase store files in one location (e.g. table/region/family/) and when the file is not
+ *    needed anymore (e.g. compaction, region deletetion, ...) moves it to an archive directory.
+ *  </li>
+ * </ul>
+ * If we want to create a reference to a file, we need to remember that it can be in its
+ * original location or in the archive folder.
+ * The FileLink class tries to abstract this concept and given a set of locations
+ * it is able to switch between them making this operation transparent for the user.
+ * More concrete implementations of the FileLink are the {@link HFileLink} and the {@link HLogLink}.
+ *
+ * <p><b>Back-references:</b>
+ * To help the {@link CleanerChore} to keep track of the links to a particular file,
+ * during the FileLink creation, a new file is placed inside a back-reference directory.
+ * There's one back-reference directory for each file that has links,
+ * and in the directory there's one file per link.
+ *
+ * <p>HFileLink Example
+ * <ul>
+ *  <li>
+ *      /hbase/table/region-x/cf/file-k
+ *      (Original File)
+ *  </li>
+ *  <li>
+ *      /hbase/table-cloned/region-y/cf/file-k.region-x.table
+ *     (HFileLink to the original file)
+ *  </li>
+ *  <li>
+ *      /hbase/table-2nd-cloned/region-z/cf/file-k.region-x.table
+ *      (HFileLink to the original file)
+ *  </li>
+ *  <li>
+ *      /hbase/.archive/table/region-x/.links-file-k/region-y.table-cloned
+ *      (Back-reference to the link in table-cloned)
+ *  </li>
+ *  <li>
+ *      /hbase/.archive/table/region-x/.links-file-k/region-z.table-cloned
+ *      (Back-reference to the link in table-2nd-cloned)
+ *  </li>
+ * </ul>
+ */
+@InterfaceAudience.Private
+public class FileLink {
+  private static final Log LOG = LogFactory.getLog(FileLink.class);
+
+  /** Define the Back-reference directory name prefix: .links-<hfile>/ */
+  public static final String BACK_REFERENCES_DIRECTORY_PREFIX = ".links-";
+
+  /**
+   * FileLink InputStream that handles the switch between the original path
+   * and the alternative locations, when the file is moved.
+   */
+  private static class FileLinkInputStream extends InputStream
+      implements Seekable, PositionedReadable {
+    private FSDataInputStream in = null;
+    private Path currentPath = null;
+    private long pos = 0;
+
+    private final FileLink fileLink;
+    private final int bufferSize;
+    private final FileSystem fs;
+
+    public FileLinkInputStream(final FileSystem fs, final FileLink fileLink)
+        throws IOException {
+      this(fs, fileLink, fs.getConf().getInt("io.file.buffer.size", 4096));
+    }
+
+    public FileLinkInputStream(final FileSystem fs, final FileLink fileLink, int bufferSize)
+        throws IOException {
+      this.bufferSize = bufferSize;
+      this.fileLink = fileLink;
+      this.fs = fs;
+
+      this.in = tryOpen();
+    }
+
+    @Override
+    public int read() throws IOException {
+      int res;
+      try {
+        res = in.read();
+      } catch (FileNotFoundException e) {
+        res = tryOpen().read();
+      } catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
+        res = tryOpen().read();
+      } catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
+        res = tryOpen().read();
+      }
+      if (res > 0) pos += 1;
+      return res;
+    }
+
+    @Override
+    public int read(byte b[]) throws IOException {
+       return read(b, 0, b.length);
+    }
+
+    @Override
+    public int read(byte b[], int off, int len) throws IOException {
+      int n;
+      try {
+        n = in.read(b, off, len);
+      } catch (FileNotFoundException e) {
+        n = tryOpen().read(b, off, len);
+      } catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
+        n = tryOpen().read(b, off, len);
+      } catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
+        n = tryOpen().read(b, off, len);
+      }
+      if (n > 0) pos += n;
+      assert(in.getPos() == pos);
+      return n;
+    }
+
+    @Override
+    public int read(long position, byte[] buffer, int offset, int length) throws IOException {
+      int n;
+      try {
+        n = in.read(position, buffer, offset, length);
+      } catch (FileNotFoundException e) {
+        n = tryOpen().read(position, buffer, offset, length);
+      } catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
+        n = tryOpen().read(position, buffer, offset, length);
+      } catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
+        n = tryOpen().read(position, buffer, offset, length);
+      }
+      return n;
+    }
+
+    @Override
+    public void readFully(long position, byte[] buffer) throws IOException {
+      readFully(position, buffer, 0, buffer.length);
+    }
+
+    @Override
+    public void readFully(long position, byte[] buffer, int offset, int length) throws IOException {
+      try {
+        in.readFully(position, buffer, offset, length);
+      } catch (FileNotFoundException e) {
+        tryOpen().readFully(position, buffer, offset, length);
+      } catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
+        tryOpen().readFully(position, buffer, offset, length);
+      } catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
+        tryOpen().readFully(position, buffer, offset, length);
+      }
+    }
+
+    @Override
+    public long skip(long n) throws IOException {
+      long skipped;
+
+      try {
+        skipped = in.skip(n);
+      } catch (FileNotFoundException e) {
+        skipped = tryOpen().skip(n);
+      } catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
+        skipped = tryOpen().skip(n);
+      } catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
+        skipped = tryOpen().skip(n);
+      }
+
+      if (skipped > 0) pos += skipped;
+      return skipped;
+    }
+
+    @Override
+    public int available() throws IOException {
+      try {
+        return in.available();
+      } catch (FileNotFoundException e) {
+        return tryOpen().available();
+      } catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
+        return tryOpen().available();
+      } catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
+        return tryOpen().available();
+      }
+    }
+
+    @Override
+    public void seek(long pos) throws IOException {
+      try {
+        in.seek(pos);
+      } catch (FileNotFoundException e) {
+        tryOpen().seek(pos);
+      } catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
+        tryOpen().seek(pos);
+      } catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
+        tryOpen().seek(pos);
+      }
+      this.pos = pos;
+    }
+
+    @Override
+    public long getPos() throws IOException {
+      return pos;
+    }
+
+    @Override
+    public boolean seekToNewSource(long targetPos) throws IOException {
+      boolean res;
+      try {
+        res = in.seekToNewSource(targetPos);
+      } catch (FileNotFoundException e) {
+        res = tryOpen().seekToNewSource(targetPos);
+      } catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
+        res = tryOpen().seekToNewSource(targetPos);
+      } catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
+        res = tryOpen().seekToNewSource(targetPos);
+      }
+      if (res) pos = targetPos;
+      return res;
+    }
+
+    @Override
+    public void close() throws IOException {
+      in.close();
+    }
+
+    @Override
+    public synchronized void mark(int readlimit) {
+    }
+
+    @Override
+    public synchronized void reset() throws IOException {
+      throw new IOException("mark/reset not supported");
+    }
+
+    @Override
+    public boolean markSupported() {
+      return false;
+    }
+
+    /**
+     * Try to open the file from one of the available locations.
+     *
+     * @return FSDataInputStream stream of the opened file link
+     * @throws IOException on unexpected error, or file not found.
+     */
+    private FSDataInputStream tryOpen() throws IOException {
+      for (Path path: fileLink.getLocations()) {
+        if (path.equals(currentPath)) continue;
+        try {
+          in = fs.open(path, bufferSize);
+          in.seek(pos);
+          assert(in.getPos() == pos) : "Link unable to seek to the right position=" + pos;
+          if (LOG.isTraceEnabled()) {
+            if (currentPath != null) {
+              LOG.debug("link open path=" + path);
+            } else {
+              LOG.trace("link switch from path=" + currentPath + " to path=" + path);
+            }
+          }
+          currentPath = path;
+          return(in);
+        } catch (FileNotFoundException e) {
+          // Try another file location
+        }
+      }
+      throw new FileNotFoundException("Unable to open link: " + fileLink);
+    }
+  }
+
+  private Path[] locations = null;
+
+  protected FileLink() {
+    this.locations = null;
+  }
+
+  /**
+   * @param originPath Original location of the file to link
+   * @param alternativePaths Alternative locations to look for the linked file
+   */
+  public FileLink(Path originPath, Path... alternativePaths) {
+    setLocations(originPath, alternativePaths);
+  }
+
+  /**
+   * @param locations locations to look for the linked file
+   */
+  public FileLink(final Collection<Path> locations) {
+    this.locations = locations.toArray(new Path[locations.size()]);
+  }
+
+  /**
+   * @return the locations to look for the linked file.
+   */
+  public Path[] getLocations() {
+    return locations;
+  }
+
+  public String toString() {
+    StringBuilder str = new StringBuilder(getClass().getName());
+    str.append(" locations=[");
+    int i = 0;
+    for (Path location: locations) {
+      if (i++ > 0) str.append(", ");
+      str.append(location.toString());
+    }
+    str.append("]");
+    return str.toString();
+  }
+
+  /**
+   * @return the path of the first available link.
+   */
+  public Path getAvailablePath(FileSystem fs) throws IOException {
+    for (Path path: locations) {
+      if (fs.exists(path)) {
+        return path;
+      }
+    }
+    throw new FileNotFoundException("Unable to open link: " + this);
+  }
+
+  /**
+   * Get the FileStatus of the referenced file.
+   *
+   * @param fs {@link FileSystem} on which to get the file status
+   * @return InputStream for the hfile link.
+   * @throws IOException on unexpected error.
+   */
+  public FileStatus getFileStatus(FileSystem fs) throws IOException {
+    for (Path path: locations) {
+      try {
+        return fs.getFileStatus(path);
+      } catch (FileNotFoundException e) {
+        // Try another file location
+      }
+    }
+    throw new FileNotFoundException("Unable to open link: " + this);
+  }
+
+  /**
+   * Open the FileLink for read.
+   * <p>
+   * It uses a wrapper of FSDataInputStream that is agnostic to the location
+   * of the file, even if the file switches between locations.
+   *
+   * @param fs {@link FileSystem} on which to open the FileLink
+   * @return InputStream for reading the file link.
+   * @throws IOException on unexpected error.
+   */
+  public FSDataInputStream open(final FileSystem fs) throws IOException {
+    return new FSDataInputStream(new FileLinkInputStream(fs, this));
+  }
+
+  /**
+   * Open the FileLink for read.
+   * <p>
+   * It uses a wrapper of FSDataInputStream that is agnostic to the location
+   * of the file, even if the file switches between locations.
+   *
+   * @param fs {@link FileSystem} on which to open the FileLink
+   * @param bufferSize the size of the buffer to be used.
+   * @return InputStream for reading the file link.
+   * @throws IOException on unexpected error.
+   */
+  public FSDataInputStream open(final FileSystem fs, int bufferSize) throws IOException {
+    return new FSDataInputStream(new FileLinkInputStream(fs, this, bufferSize));
+  }
+
+  /**
+   * NOTE: This method must be used only in the constructor!
+   * It creates a List with the specified locations for the link.
+   */
+  protected void setLocations(Path originPath, Path... alternativePaths) {
+    assert this.locations == null : "Link locations already set";
+    this.locations = new Path[1 + alternativePaths.length];
+    this.locations[0] = originPath;
+    for (int i = 0; i < alternativePaths.length; i++) {
+      this.locations[i + 1] = alternativePaths[i];
+    }
+  }
+
+  /**
+   * Get the directory to store the link back references
+   *
+   * <p>To simplify the reference count process, during the FileLink creation
+   * a back-reference is added to the back-reference directory of the specified file.
+   *
+   * @param storeDir Root directory for the link reference folder
+   * @param fileName File Name with links
+   * @return Path for the link back references.
+   */
+  public static Path getBackReferencesDir(final Path storeDir, final String fileName) {
+    return new Path(storeDir, BACK_REFERENCES_DIRECTORY_PREFIX + fileName);
+  }
+
+  /**
+   * Get the referenced file name from the reference link directory path.
+   *
+   * @param dirPath Link references directory path
+   * @return Name of the file referenced
+   */
+  public static String getBackReferenceFileName(final Path dirPath) {
+    return dirPath.getName().substring(BACK_REFERENCES_DIRECTORY_PREFIX.length());
+  }
+
+  /**
+   * Checks if the specified directory path is a back reference links folder.
+   *
+   * @param dirPath Directory path to verify
+   * @return True if the specified directory is a link references folder
+   */
+  public static boolean isBackReferencesDir(final Path dirPath) {
+    if (dirPath == null) return false;
+    return dirPath.getName().startsWith(BACK_REFERENCES_DIRECTORY_PREFIX);
+  }
+}
+

Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,371 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io;
+
+import java.io.IOException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+
+/**
+ * HFileLink describes a link to an hfile.
+ *
+ * An hfile can be served from a region or from the hfile archive directory (/hbase/.archive)
+ * HFileLink allows to access the referenced hfile regardless of the location where it is.
+ *
+ * <p>Searches for hfiles in the following order and locations:
+ * <ul>
+ *  <li>/hbase/table/region/cf/hfile</li>
+ *  <li>/hbase/.archive/table/region/cf/hfile</li>
+ * </ul>
+ *
+ * The link checks first in the original path if it is not present
+ * it fallbacks to the archived path.
+ */
+@InterfaceAudience.Private
+public class HFileLink extends FileLink {
+  private static final Log LOG = LogFactory.getLog(HFileLink.class);
+
+  /**
+   * A non-capture group, for HFileLink, so that this can be embedded.
+   * The HFileLink describe a link to an hfile in a different table/region
+   * and the name is in the form: table=region-hfile.
+   * <p>
+   * Table name is ([a-zA-Z_0-9][a-zA-Z_0-9.-]*), so '=' is an invalid character for the table name.
+   * Region name is ([a-f0-9]+), so '-' is an invalid character for the region name.
+   * HFile is ([0-9a-f]+(?:_SeqId_[0-9]+_)?) covering the plain hfiles (uuid)
+   * and the bulk loaded (_SeqId_[0-9]+_) hfiles.
+   */
+  public static final String LINK_NAME_REGEX =
+    String.format("%s=%s-%s", HTableDescriptor.VALID_USER_TABLE_REGEX,
+      HRegionInfo.ENCODED_REGION_NAME_REGEX, StoreFile.HFILE_NAME_REGEX);
+
+  /** Define the HFile Link name parser in the form of: table=region-hfile */
+  private static final Pattern LINK_NAME_PATTERN =
+    Pattern.compile(String.format("^(%s)=(%s)-(%s)$", HTableDescriptor.VALID_USER_TABLE_REGEX,
+      HRegionInfo.ENCODED_REGION_NAME_REGEX, StoreFile.HFILE_NAME_REGEX));
+
+  /**
+   * The pattern should be used for hfile and reference links
+   * that can be found in /hbase/table/region/family/
+   */
+  private static final Pattern REF_OR_HFILE_LINK_PATTERN =
+    Pattern.compile(String.format("^(%s)=(%s)-(.+)$", HTableDescriptor.VALID_USER_TABLE_REGEX,
+      HRegionInfo.ENCODED_REGION_NAME_REGEX));
+
+  private final Path archivePath;
+  private final Path originPath;
+  private final Path tempPath;
+
+  /**
+   * @param conf {@link Configuration} from which to extract specific archive locations
+   * @param path The path of the HFile Link.
+   * @throws IOException on unexpected error.
+   */
+  public HFileLink(Configuration conf, Path path) throws IOException {
+    this(FSUtils.getRootDir(conf), HFileArchiveUtil.getArchivePath(conf), path);
+  }
+
+  /**
+   * @param rootDir Path to the root directory where hbase files are stored
+   * @param archiveDir Path to the hbase archive directory
+   * @param path The path of the HFile Link.
+   */
+  public HFileLink(final Path rootDir, final Path archiveDir, final Path path) {
+    Path hfilePath = getRelativeTablePath(path);
+    this.tempPath = new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), hfilePath);
+    this.originPath = new Path(rootDir, hfilePath);
+    this.archivePath = new Path(archiveDir, hfilePath);
+    setLocations(originPath, tempPath, archivePath);
+  }
+
+  /**
+   * @return the origin path of the hfile.
+   */
+  public Path getOriginPath() {
+    return this.originPath;
+  }
+
+  /**
+   * @return the path of the archived hfile.
+   */
+  public Path getArchivePath() {
+    return this.archivePath;
+  }
+
+  /**
+   * @param path Path to check.
+   * @return True if the path is a HFileLink.
+   */
+  public static boolean isHFileLink(final Path path) {
+    return isHFileLink(path.getName());
+  }
+
+
+  /**
+   * @param fileName File name to check.
+   * @return True if the path is a HFileLink.
+   */
+  public static boolean isHFileLink(String fileName) {
+    Matcher m = LINK_NAME_PATTERN.matcher(fileName);
+    if (!m.matches()) return false;
+
+    return m.groupCount() > 2 && m.group(3) != null && m.group(2) != null && m.group(1) != null;
+  }
+
+  /**
+   * Convert a HFileLink path to a table relative path.
+   * e.g. the link: /hbase/test/0123/cf/testtb=4567-abcd
+   *      becomes: /hbase/testtb/4567/cf/abcd
+   *
+   * @param path HFileLink path
+   * @return Relative table path
+   * @throws IOException on unexpected error.
+   */
+  private static Path getRelativeTablePath(final Path path) {
+    // table=region-hfile
+    Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(path.getName());
+    if (!m.matches()) {
+      throw new IllegalArgumentException(path.getName() + " is not a valid HFileLink name!");
+    }
+
+    // Convert the HFileLink name into a real table/region/cf/hfile path.
+    String tableName = m.group(1);
+    String regionName = m.group(2);
+    String hfileName = m.group(3);
+    String familyName = path.getParent().getName();
+    return new Path(new Path(tableName, regionName), new Path(familyName, hfileName));
+  }
+
+  /**
+   * Get the HFile name of the referenced link
+   *
+   * @param fileName HFileLink file name
+   * @return the name of the referenced HFile
+   */
+  public static String getReferencedHFileName(final String fileName) {
+    Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(fileName);
+    if (!m.matches()) {
+      throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!");
+    }
+    return(m.group(3));
+  }
+
+  /**
+   * Get the Region name of the referenced link
+   *
+   * @param fileName HFileLink file name
+   * @return the name of the referenced Region
+   */
+  public static String getReferencedRegionName(final String fileName) {
+    Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(fileName);
+    if (!m.matches()) {
+      throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!");
+    }
+    return(m.group(2));
+  }
+
+  /**
+   * Get the Table name of the referenced link
+   *
+   * @param fileName HFileLink file name
+   * @return the name of the referenced Table
+   */
+  public static String getReferencedTableName(final String fileName) {
+    Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(fileName);
+    if (!m.matches()) {
+      throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!");
+    }
+    return(m.group(1));
+  }
+
+  /**
+   * Create a new HFileLink name
+   *
+   * @param hfileRegionInfo - Linked HFile Region Info
+   * @param hfileName - Linked HFile name
+   * @return file name of the HFile Link
+   */
+  public static String createHFileLinkName(final HRegionInfo hfileRegionInfo,
+      final String hfileName) {
+    return createHFileLinkName(hfileRegionInfo.getTableNameAsString(),
+                      hfileRegionInfo.getEncodedName(), hfileName);
+  }
+
+  /**
+   * Create a new HFileLink name
+   *
+   * @param tableName - Linked HFile table name
+   * @param regionName - Linked HFile region name
+   * @param hfileName - Linked HFile name
+   * @return file name of the HFile Link
+   */
+  public static String createHFileLinkName(final String tableName,
+      final String regionName, final String hfileName) {
+    return String.format("%s=%s-%s", tableName, regionName, hfileName);
+  }
+
+  /**
+   * Create a new HFileLink
+   *
+   * <p>It also adds a back-reference to the hfile back-reference directory
+   * to simplify the reference-count and the cleaning process.
+   *
+   * @param conf {@link Configuration} to read for the archive directory name
+   * @param fs {@link FileSystem} on which to write the HFileLink
+   * @param dstFamilyPath - Destination path (table/region/cf/)
+   * @param hfileRegionInfo - Linked HFile Region Info
+   * @param hfileName - Linked HFile name
+   * @return true if the file is created, otherwise the file exists.
+   * @throws IOException on file or parent directory creation failure
+   */
+  public static boolean create(final Configuration conf, final FileSystem fs,
+      final Path dstFamilyPath, final HRegionInfo hfileRegionInfo,
+      final String hfileName) throws IOException {
+    String linkedTable = hfileRegionInfo.getTableNameAsString();
+    String linkedRegion = hfileRegionInfo.getEncodedName();
+    return create(conf, fs, dstFamilyPath, linkedTable, linkedRegion, hfileName);
+  }
+
+  /**
+   * Create a new HFileLink
+   *
+   * <p>It also adds a back-reference to the hfile back-reference directory
+   * to simplify the reference-count and the cleaning process.
+   *
+   * @param conf {@link Configuration} to read for the archive directory name
+   * @param fs {@link FileSystem} on which to write the HFileLink
+   * @param dstFamilyPath - Destination path (table/region/cf/)
+   * @param linkedTable - Linked Table Name
+   * @param linkedRegion - Linked Region Name
+   * @param hfileName - Linked HFile name
+   * @return true if the file is created, otherwise the file exists.
+   * @throws IOException on file or parent directory creation failure
+   */
+  public static boolean create(final Configuration conf, final FileSystem fs,
+      final Path dstFamilyPath, final String linkedTable, final String linkedRegion,
+      final String hfileName) throws IOException {
+    String familyName = dstFamilyPath.getName();
+    String regionName = dstFamilyPath.getParent().getName();
+    String tableName = dstFamilyPath.getParent().getParent().getName();
+
+    String name = createHFileLinkName(linkedTable, linkedRegion, hfileName);
+    String refName = createBackReferenceName(tableName, regionName);
+
+    // Make sure the destination directory exists
+    fs.mkdirs(dstFamilyPath);
+
+    // Make sure the FileLink reference directory exists
+    Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
+          linkedTable, linkedRegion, familyName);
+    Path backRefssDir = getBackReferencesDir(archiveStoreDir, hfileName);
+    fs.mkdirs(backRefssDir);
+
+    // Create the reference for the link
+    Path backRefPath = new Path(backRefssDir, refName);
+    fs.createNewFile(backRefPath);
+    try {
+      // Create the link
+      return fs.createNewFile(new Path(dstFamilyPath, name));
+    } catch (IOException e) {
+      LOG.error("couldn't create the link=" + name + " for " + dstFamilyPath, e);
+      // Revert the reference if the link creation failed
+      fs.delete(backRefPath, false);
+      throw e;
+    }
+  }
+
+  /**
+   * Create a new HFileLink starting from a hfileLink name
+   *
+   * <p>It also adds a back-reference to the hfile back-reference directory
+   * to simplify the reference-count and the cleaning process.
+   *
+   * @param conf {@link Configuration} to read for the archive directory name
+   * @param fs {@link FileSystem} on which to write the HFileLink
+   * @param dstFamilyPath - Destination path (table/region/cf/)
+   * @param hfileLinkName - HFileLink name (it contains hfile-region-table)
+   * @return true if the file is created, otherwise the file exists.
+   * @throws IOException on file or parent directory creation failure
+   */
+  public static boolean createFromHFileLink(final Configuration conf, final FileSystem fs,
+      final Path dstFamilyPath, final String hfileLinkName) throws IOException {
+    Matcher m = LINK_NAME_PATTERN.matcher(hfileLinkName);
+    if (!m.matches()) {
+      throw new IllegalArgumentException(hfileLinkName + " is not a valid HFileLink name!");
+    }
+    return create(conf, fs, dstFamilyPath, m.group(1), m.group(2), m.group(3));
+  }
+
+  /**
+   * Create the back reference name
+   */
+  private static String createBackReferenceName(final String tableName, final String regionName) {
+    return regionName + "." + tableName;
+  }
+
+  /**
+   * Get the full path of the HFile referenced by the back reference
+   *
+   * @param rootDir root hbase directory
+   * @param linkRefPath Link Back Reference path
+   * @return full path of the referenced hfile
+   * @throws IOException on unexpected error.
+   */
+  public static Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath) {
+    int separatorIndex = linkRefPath.getName().indexOf('.');
+    String linkRegionName = linkRefPath.getName().substring(0, separatorIndex);
+    String linkTableName = linkRefPath.getName().substring(separatorIndex + 1);
+    String hfileName = getBackReferenceFileName(linkRefPath.getParent());
+    Path familyPath = linkRefPath.getParent().getParent();
+    Path regionPath = familyPath.getParent();
+    Path tablePath = regionPath.getParent();
+
+    String linkName = createHFileLinkName(tablePath.getName(), regionPath.getName(), hfileName);
+    Path linkTableDir = FSUtils.getTablePath(rootDir, linkTableName);
+    Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName);
+    return new Path(new Path(regionDir, familyPath.getName()), linkName);
+  }
+
+  /**
+   * Get the full path of the HFile referenced by the back reference
+   *
+   * @param conf {@link Configuration} to read for the archive directory name
+   * @param linkRefPath Link Back Reference path
+   * @return full path of the referenced hfile
+   * @throws IOException on unexpected error.
+   */
+  public static Path getHFileFromBackReference(final Configuration conf, final Path linkRefPath)
+      throws IOException {
+    return getHFileFromBackReference(FSUtils.getRootDir(conf), linkRefPath);
+  }
+}

Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HLogLink.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HLogLink.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HLogLink.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HLogLink.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+/**
+ * HLogLink describes a link to a WAL.
+ *
+ * An hlog can be in /hbase/.logs/<server>/<hlog>
+ * or it can be in /hbase/.oldlogs/<hlog>
+ *
+ * The link checks first in the original path,
+ * if it is not present it fallbacks to the archived path.
+ */
+@InterfaceAudience.Private
+public class HLogLink extends FileLink {
+  /**
+   * @param conf {@link Configuration} from which to extract specific archive locations
+   * @param serverName Region Server owner of the log
+   * @param logName WAL file name
+   * @throws IOException on unexpected error.
+   */
+  public HLogLink(final Configuration conf,
+      final String serverName, final String logName) throws IOException {
+    this(FSUtils.getRootDir(conf), serverName, logName);
+  }
+
+  /**
+   * @param rootDir Path to the root directory where hbase files are stored
+   * @param serverName Region Server owner of the log
+   * @param logName WAL file name
+   */
+  public HLogLink(final Path rootDir, final String serverName, final String logName) {
+    final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
+    final Path logDir = new Path(new Path(rootDir, HConstants.HREGION_LOGDIR_NAME), serverName);
+    setLocations(new Path(logDir, logName), new Path(oldLogDir, logName));
+  }
+
+  /**
+   * @param originPath Path to the wal in the log directory
+   * @param archivePath Path to the wal in the archived log directory
+   */
+  public HLogLink(final Path originPath, final Path archivePath) {
+    setLocations(originPath, archivePath);
+  }
+}

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java Mon Mar  4 11:24:50 2013
@@ -58,10 +58,12 @@ public class HalfStoreFileReader extends
   private boolean firstKeySeeked = false;
 
   /**
-   * @param fs
-   * @param p
+   * Creates a half file reader for a normal hfile.
+   * @param fs fileystem to read from
+   * @param p path to hfile
    * @param cacheConf
-   * @param r
+   * @param r original reference file (contains top or bottom)
+   * @param preferredEncodingInCache
    * @throws IOException
    */
   public HalfStoreFileReader(final FileSystem fs, final Path p,
@@ -78,6 +80,30 @@ public class HalfStoreFileReader extends
     this.top = Reference.isTopFileRegion(r.getFileRegion());
   }
 
+  /**
+   * Creates a half file reader for a hfile referred to by an hfilelink.
+   * @param fs fileystem to read from
+   * @param p path to hfile
+   * @param link
+   * @param cacheConf
+   * @param r original reference file (contains top or bottom)
+   * @param preferredEncodingInCache
+   * @throws IOException
+   */
+  public HalfStoreFileReader(final FileSystem fs, final Path p, final HFileLink link,
+      final CacheConfig cacheConf, final Reference r,
+      DataBlockEncoding preferredEncodingInCache) throws IOException {
+    super(fs, p, link, link.getFileStatus(fs).getLen(), cacheConf, preferredEncodingInCache, true);
+    // This is not actual midkey for this half-file; its just border
+    // around which we split top and bottom.  Have to look in files to find
+    // actual last and first keys for bottom and top halves.  Half-files don't
+    // have an actual midkey themselves. No midkey is how we indicate file is
+    // not splittable.
+    this.splitkey = r.getSplitKey();
+    // Is it top or bottom half?
+    this.top = Reference.isTopFileRegion(r.getFileRegion());
+  }
+
   protected boolean isTop() {
     return this.top;
   }

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java Mon Mar  4 11:24:50 2013
@@ -90,6 +90,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.snapshot.HSnapshotDescription;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ProtoUtil;
 import org.apache.hadoop.io.MapWritable;
@@ -269,6 +270,10 @@ public class HbaseObjectWritable impleme
     
     addToMap(FuzzyRowFilter.class, code++);
 
+    // we aren't going to bump the rpc version number.
+    // we don't want to cause incompatiblity with older 0.94/0.92 clients.
+    addToMap(HSnapshotDescription.class, code);
+
     // make sure that this is the last statement in this static block
     NEXT_CLASS_CODE = code;
   }

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Mon Mar  4 11:24:50 2013
@@ -180,18 +180,18 @@ public class HFile {
   static final AtomicLong checksumFailures = new AtomicLong();
 
   // For getting more detailed stats on FS latencies
-  // If, for some reason, the metrics subsystem stops polling for latencies, 
+  // If, for some reason, the metrics subsystem stops polling for latencies,
   // I don't want data to pile up in a memory leak
   // so, after LATENCY_BUFFER_SIZE items have been enqueued for processing,
   // fs latency stats will be dropped (and this behavior will be logged)
   private static final int LATENCY_BUFFER_SIZE = 5000;
-  private static final BlockingQueue<Long> fsReadLatenciesNanos = 
+  private static final BlockingQueue<Long> fsReadLatenciesNanos =
       new ArrayBlockingQueue<Long>(LATENCY_BUFFER_SIZE);
-  private static final BlockingQueue<Long> fsWriteLatenciesNanos = 
+  private static final BlockingQueue<Long> fsWriteLatenciesNanos =
       new ArrayBlockingQueue<Long>(LATENCY_BUFFER_SIZE);
-  private static final BlockingQueue<Long> fsPreadLatenciesNanos = 
+  private static final BlockingQueue<Long> fsPreadLatenciesNanos =
       new ArrayBlockingQueue<Long>(LATENCY_BUFFER_SIZE);
-  
+
   public static final void offerReadLatency(long latencyNanos, boolean pread) {
     if (pread) {
       fsPreadLatenciesNanos.offer(latencyNanos); // might be silently dropped, if the queue is full
@@ -203,30 +203,30 @@ public class HFile {
       readOps.incrementAndGet();
     }
   }
-  
+
   public static final void offerWriteLatency(long latencyNanos) {
     fsWriteLatenciesNanos.offer(latencyNanos); // might be silently dropped, if the queue is full
-    
+
     writeTimeNano.addAndGet(latencyNanos);
     writeOps.incrementAndGet();
   }
-  
+
   public static final Collection<Long> getReadLatenciesNanos() {
-    final List<Long> latencies = 
+    final List<Long> latencies =
         Lists.newArrayListWithCapacity(fsReadLatenciesNanos.size());
     fsReadLatenciesNanos.drainTo(latencies);
     return latencies;
   }
 
   public static final Collection<Long> getPreadLatenciesNanos() {
-    final List<Long> latencies = 
+    final List<Long> latencies =
         Lists.newArrayListWithCapacity(fsPreadLatenciesNanos.size());
     fsPreadLatenciesNanos.drainTo(latencies);
     return latencies;
   }
-  
+
   public static final Collection<Long> getWriteLatenciesNanos() {
-    final List<Long> latencies = 
+    final List<Long> latencies =
         Lists.newArrayListWithCapacity(fsWriteLatenciesNanos.size());
     fsWriteLatenciesNanos.drainTo(latencies);
     return latencies;
@@ -572,7 +572,7 @@ public class HFile {
     HFileSystem hfs = null;
     FSDataInputStream fsdis = fs.open(path);
     FSDataInputStream fsdisNoFsChecksum = fsdis;
-    // If the fs is not an instance of HFileSystem, then create an 
+    // If the fs is not an instance of HFileSystem, then create an
     // instance of HFileSystem that wraps over the specified fs.
     // In this case, we will not be able to avoid checksumming inside
     // the filesystem.
@@ -592,6 +592,39 @@ public class HFile {
   }
 
   /**
+   * @param fs A file system
+   * @param path Path to HFile
+   * @param fsdis an open checksummed stream of path's file
+   * @param fsdisNoFsChecksum an open unchecksummed stream of path's file
+   * @param size max size of the trailer.
+   * @param cacheConf Cache configuration for hfile's contents
+   * @param preferredEncodingInCache Preferred in-cache data encoding algorithm.
+   * @param closeIStream boolean for closing file after the getting the reader version.
+   * @return A version specific Hfile Reader
+   * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
+   */
+  public static Reader createReaderWithEncoding(
+      FileSystem fs, Path path, FSDataInputStream fsdis,
+      FSDataInputStream fsdisNoFsChecksum, long size, CacheConfig cacheConf,
+      DataBlockEncoding preferredEncodingInCache, boolean closeIStream)
+      throws IOException {
+    HFileSystem hfs = null;
+
+    // If the fs is not an instance of HFileSystem, then create an
+    // instance of HFileSystem that wraps over the specified fs.
+    // In this case, we will not be able to avoid checksumming inside
+    // the filesystem.
+    if (!(fs instanceof HFileSystem)) {
+      hfs = new HFileSystem(fs);
+    } else {
+      hfs = (HFileSystem)fs;
+    }
+    return pickReaderVersion(path, fsdis, fsdisNoFsChecksum, size,
+                             closeIStream, cacheConf,
+                             preferredEncodingInCache, hfs);
+  }
+
+  /**
    * @param fs filesystem
    * @param path Path to file to read
    * @param cacheConf This must not be null.  @see {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#CacheConfig(Configuration)}

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java Mon Mar  4 11:24:50 2013
@@ -581,15 +581,21 @@ public class LruBlockCache implements Bl
     return this.stats.getEvictedCount();
   }
 
+  EvictionThread getEvictionThread() {
+    return this.evictionThread;
+  }
+
   /*
    * Eviction thread.  Sits in waiting state until an eviction is triggered
    * when the cache size grows above the acceptable level.<p>
    *
    * Thread is triggered into action by {@link LruBlockCache#runEviction()}
    */
-  private static class EvictionThread extends HasThread {
+  static class EvictionThread extends HasThread {
     private WeakReference<LruBlockCache> cache;
     private boolean go = true;
+    // flag set after enter the run method, used for test
+    private boolean enteringRun = false;
 
     public EvictionThread(LruBlockCache cache) {
       super(Thread.currentThread().getName() + ".LruBlockCache.EvictionThread");
@@ -599,6 +605,7 @@ public class LruBlockCache implements Bl
 
     @Override
     public void run() {
+      enteringRun = true;
       while (this.go) {
         synchronized(this) {
           try {
@@ -621,6 +628,13 @@ public class LruBlockCache implements Bl
       this.go = false;
       interrupt();
     }
+
+    /**
+     * Used for the test.
+     */
+    boolean isEnteringRun() {
+      return this.enteringRun;
+    }
   }
 
   /*

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java Mon Mar  4 11:24:50 2013
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.co
 import org.apache.hadoop.hbase.client.coprocessor.ExecResult;
 import org.apache.hadoop.hbase.security.TokenInfo;
 import org.apache.hadoop.hbase.security.KerberosInfo;
+import org.apache.hadoop.hbase.snapshot.HSnapshotDescription;
 import org.apache.hadoop.hbase.util.Pair;
 
 /**
@@ -286,4 +287,22 @@ public interface HMasterInterface extend
    */
   public ExecResult execCoprocessor(Exec call)
       throws IOException;
+
+  public long snapshot(final HSnapshotDescription snapshot)
+    throws IOException;
+
+  public List<HSnapshotDescription> getCompletedSnapshots()
+    throws IOException;
+
+  public void deleteSnapshot(final HSnapshotDescription snapshot)
+    throws IOException;
+
+  public boolean isSnapshotDone(final HSnapshotDescription snapshot)
+    throws IOException;
+
+  public void restoreSnapshot(final HSnapshotDescription request)
+    throws IOException;
+
+  public boolean isRestoreSnapshotDone(final HSnapshotDescription request)
+    throws IOException;
 }

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Mon Mar  4 11:24:50 2013
@@ -44,6 +44,7 @@ import javax.management.ObjectName;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Chore;
 import org.apache.hadoop.hbase.ClusterStatus;
@@ -92,13 +93,18 @@ import org.apache.hadoop.hbase.master.ha
 import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
 import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
 import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
+import org.apache.hadoop.hbase.snapshot.HSnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.InfoServer;
@@ -225,6 +231,9 @@ Server {
   private long masterStartTime;
   private long masterActiveTime;
 
+  // monitor for snapshot of hbase tables
+  private SnapshotManager snapshotManager;
+
   /**
    * MX Bean for MasterInfo
    */
@@ -406,6 +415,7 @@ Server {
       if (this.serverManager != null) this.serverManager.stop();
       if (this.assignmentManager != null) this.assignmentManager.stop();
       if (this.fileSystemManager != null) this.fileSystemManager.stop();
+      if (this.snapshotManager != null) this.snapshotManager.stop("server shutting down.");
       this.zooKeeper.close();
     }
     LOG.info("HMaster main thread exiting");
@@ -467,6 +477,9 @@ Server {
         ", sessionid=0x" +
         Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
         ", cluster-up flag was=" + wasUp);
+
+    // create the snapshot manager
+    this.snapshotManager = new SnapshotManager(this);
   }
 
   // Check if we should stop every second.
@@ -1989,4 +2002,125 @@ Server {
     String healthScriptLocation = this.conf.get(HConstants.HEALTH_SCRIPT_LOC);
     return org.apache.commons.lang.StringUtils.isNotBlank(healthScriptLocation);
   }
+
+  /**
+   * Exposed for TESTING!
+   * @return the underlying snapshot manager
+   */
+  public SnapshotManager getSnapshotManagerForTesting() {
+    return this.snapshotManager;
+   }
+
+
+  /**
+   * Triggers an asynchronous attempt to take a snapshot.
+   * {@inheritDoc}
+   */
+  @Override
+  public long snapshot(final HSnapshotDescription request) throws IOException {
+    LOG.debug("Submitting snapshot request for:" +
+        SnapshotDescriptionUtils.toString(request.getProto()));
+    try {
+      this.snapshotManager.checkSnapshotSupport();
+    } catch (UnsupportedOperationException e) {
+      throw new IOException(e);
+    }
+
+    // get the snapshot information
+    SnapshotDescription snapshot = SnapshotDescriptionUtils.validate(request.getProto(),
+      this.conf);
+
+    snapshotManager.takeSnapshot(snapshot);
+
+    // send back the max amount of time the client should wait for the snapshot to complete
+    long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(conf, snapshot.getType(),
+      SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
+    return waitTime;
+  }
+
+  /**
+   * List the currently available/stored snapshots. Any in-progress snapshots are ignored
+   */
+  @Override
+  public List<HSnapshotDescription> getCompletedSnapshots() throws IOException {
+    List<HSnapshotDescription> availableSnapshots = new ArrayList<HSnapshotDescription>();
+    List<SnapshotDescription> snapshots = snapshotManager.getCompletedSnapshots();
+
+    // convert to writables
+    for (SnapshotDescription snapshot: snapshots) {
+      availableSnapshots.add(new HSnapshotDescription(snapshot));
+    }
+
+    return availableSnapshots;
+  }
+
+  /**
+   * Execute Delete Snapshot operation.
+   * @throws ServiceException wrapping SnapshotDoesNotExistException if specified snapshot did not
+   * exist.
+   */
+  @Override
+  public void deleteSnapshot(final HSnapshotDescription request) throws IOException {
+    try {
+      this.snapshotManager.checkSnapshotSupport();
+    } catch (UnsupportedOperationException e) {
+      throw new IOException(e);
+    }
+
+    snapshotManager.deleteSnapshot(request.getProto());
+  }
+
+  /**
+   * Checks if the specified snapshot is done.
+   * @return true if the snapshot is in file system ready to use,
+   * false if the snapshot is in the process of completing
+   * @throws ServiceException wrapping UnknownSnapshotException if invalid snapshot, or
+   * a wrapped HBaseSnapshotException with progress failure reason.
+   */
+  @Override
+  public boolean isSnapshotDone(final HSnapshotDescription request) throws IOException {
+    LOG.debug("Checking to see if snapshot from request:" +
+      SnapshotDescriptionUtils.toString(request.getProto()) + " is done");
+    return snapshotManager.isSnapshotDone(request.getProto());
+  }
+
+  /**
+   * Execute Restore/Clone snapshot operation.
+   *
+   * <p>If the specified table exists a "Restore" is executed, replacing the table
+   * schema and directory data with the content of the snapshot.
+   * The table must be disabled, or a UnsupportedOperationException will be thrown.
+   *
+   * <p>If the table doesn't exist a "Clone" is executed, a new table is created
+   * using the schema at the time of the snapshot, and the content of the snapshot.
+   *
+   * <p>The restore/clone operation does not require copying HFiles. Since HFiles
+   * are immutable the table can point to and use the same files as the original one.
+   */
+  @Override
+  public void restoreSnapshot(final HSnapshotDescription request) throws IOException {
+    try {
+      this.snapshotManager.checkSnapshotSupport();
+    } catch (UnsupportedOperationException e) {
+      throw new IOException(e);
+    }
+
+    snapshotManager.restoreSnapshot(request.getProto());
+  }
+
+  /**
+   * Returns the status of the requested snapshot restore/clone operation.
+   * This method is not exposed to the user, it is just used internally by HBaseAdmin
+   * to verify if the restore is completed.
+   *
+   * No exceptions are thrown if the restore is not running, the result will be "done".
+   *
+   * @return done <tt>true</tt> if the restore/clone operation is completed.
+   * @throws RestoreSnapshotExcepton if the operation failed.
+   */
+  @Override
+  public boolean isRestoreSnapshotDone(final HSnapshotDescription request) throws IOException {
+    return !snapshotManager.isRestoringTable(request.getProto());
+  }
 }
+

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java Mon Mar  4 11:24:50 2013
@@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.coprocessor.*;
 import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 
 import java.io.IOException;
 
@@ -629,4 +630,146 @@ public class MasterCoprocessorHost
       }
     }
   }
+
+  public void preSnapshot(final SnapshotDescription snapshot,
+      final HTableDescriptor hTableDescriptor) throws IOException {
+    ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+    for (MasterEnvironment env: coprocessors) {
+      if (env.getInstance() instanceof MasterObserver) {
+        ctx = ObserverContext.createAndPrepare(env, ctx);
+        try {
+          ((MasterObserver)env.getInstance()).preSnapshot(ctx, snapshot, hTableDescriptor);
+        } catch (Throwable e) {
+          handleCoprocessorThrowable(env, e);
+        }
+        if (ctx.shouldComplete()) {
+          break;
+        }
+      }
+    }
+  }
+
+  public void postSnapshot(final SnapshotDescription snapshot,
+      final HTableDescriptor hTableDescriptor) throws IOException {
+    ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+    for (MasterEnvironment env: coprocessors) {
+      if (env.getInstance() instanceof MasterObserver) {
+        ctx = ObserverContext.createAndPrepare(env, ctx);
+        try {
+          ((MasterObserver)env.getInstance()).postSnapshot(ctx, snapshot, hTableDescriptor);
+        } catch (Throwable e) {
+          handleCoprocessorThrowable(env, e);
+        }
+        if (ctx.shouldComplete()) {
+          break;
+        }
+      }
+    }
+  }
+
+  public void preCloneSnapshot(final SnapshotDescription snapshot,
+      final HTableDescriptor hTableDescriptor) throws IOException {
+    ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+    for (MasterEnvironment env: coprocessors) {
+      if (env.getInstance() instanceof MasterObserver) {
+        ctx = ObserverContext.createAndPrepare(env, ctx);
+        try {
+          ((MasterObserver)env.getInstance()).preCloneSnapshot(ctx, snapshot, hTableDescriptor);
+        } catch (Throwable e) {
+          handleCoprocessorThrowable(env, e);
+        }
+        if (ctx.shouldComplete()) {
+          break;
+        }
+      }
+    }
+  }
+
+  public void postCloneSnapshot(final SnapshotDescription snapshot,
+      final HTableDescriptor hTableDescriptor) throws IOException {
+    ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+    for (MasterEnvironment env: coprocessors) {
+      if (env.getInstance() instanceof MasterObserver) {
+        ctx = ObserverContext.createAndPrepare(env, ctx);
+        try {
+          ((MasterObserver)env.getInstance()).postCloneSnapshot(ctx, snapshot, hTableDescriptor);
+        } catch (Throwable e) {
+          handleCoprocessorThrowable(env, e);
+        }
+        if (ctx.shouldComplete()) {
+          break;
+        }
+      }
+    }
+  }
+
+  public void preRestoreSnapshot(final SnapshotDescription snapshot,
+      final HTableDescriptor hTableDescriptor) throws IOException {
+    ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+    for (MasterEnvironment env: coprocessors) {
+      if (env.getInstance() instanceof MasterObserver) {
+        ctx = ObserverContext.createAndPrepare(env, ctx);
+        try {
+          ((MasterObserver)env.getInstance()).preRestoreSnapshot(ctx, snapshot, hTableDescriptor);
+        } catch (Throwable e) {
+          handleCoprocessorThrowable(env, e);
+        }
+        if (ctx.shouldComplete()) {
+          break;
+        }
+      }
+    }
+  }
+
+  public void postRestoreSnapshot(final SnapshotDescription snapshot,
+      final HTableDescriptor hTableDescriptor) throws IOException {
+    ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+    for (MasterEnvironment env: coprocessors) {
+      if (env.getInstance() instanceof MasterObserver) {
+        ctx = ObserverContext.createAndPrepare(env, ctx);
+        try {
+          ((MasterObserver)env.getInstance()).postRestoreSnapshot(ctx, snapshot, hTableDescriptor);
+        } catch (Throwable e) {
+          handleCoprocessorThrowable(env, e);
+        }
+        if (ctx.shouldComplete()) {
+          break;
+        }
+      }
+    }
+  }
+
+  public void preDeleteSnapshot(final SnapshotDescription snapshot) throws IOException {
+    ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+    for (MasterEnvironment env: coprocessors) {
+      if (env.getInstance() instanceof MasterObserver) {
+        ctx = ObserverContext.createAndPrepare(env, ctx);
+        try {
+          ((MasterObserver)env.getInstance()).preDeleteSnapshot(ctx, snapshot);
+        } catch (Throwable e) {
+          handleCoprocessorThrowable(env, e);
+        }
+        if (ctx.shouldComplete()) {
+          break;
+        }
+      }
+    }
+  }
+
+  public void postDeleteSnapshot(final SnapshotDescription snapshot) throws IOException {
+    ObserverContext<MasterCoprocessorEnvironment> ctx = null;
+    for (MasterEnvironment env: coprocessors) {
+      if (env.getInstance() instanceof MasterObserver) {
+        ctx = ObserverContext.createAndPrepare(env, ctx);
+        try {
+          ((MasterObserver)env.getInstance()).postDeleteSnapshot(ctx, snapshot);
+        } catch (Throwable e) {
+          handleCoprocessorThrowable(env, e);
+        }
+        if (ctx.shouldComplete()) {
+          break;
+        }
+      }
+    }
+  }
 }

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java Mon Mar  4 11:24:50 2013
@@ -76,6 +76,8 @@ public class MasterFileSystem {
   private final Path oldLogDir;
   // root hbase directory on the FS
   private final Path rootdir;
+  // hbase temp directory used for table construction and deletion
+  private final Path tempdir;
   // create the split log lock
   final Lock splitLogLock = new ReentrantLock();
   final boolean distributedLogSplitting;
@@ -94,6 +96,7 @@ public class MasterFileSystem {
     // default localfs.  Presumption is that rootdir is fully-qualified before
     // we get to here with appropriate fs scheme.
     this.rootdir = FSUtils.getRootDir(conf);
+    this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
     // Cover both bases, the old way of setting default fs and the new.
     // We're supposed to run on 0.20 and 0.21 anyways.
     this.fs = this.rootdir.getFileSystem(conf);
@@ -130,6 +133,9 @@ public class MasterFileSystem {
     // check if the root directory exists
     checkRootDir(this.rootdir, conf, this.fs);
 
+    // check if temp directory exists and clean it
+    checkTempDir(this.tempdir, conf, this.fs);
+
     Path oldLogDir = new Path(this.rootdir, HConstants.HREGION_OLDLOGDIR_NAME);
 
     // Make sure the region servers can archive their old logs
@@ -178,6 +184,13 @@ public class MasterFileSystem {
   }
 
   /**
+   * @return HBase temp dir.
+   */
+  public Path getTempDir() {
+    return this.tempdir;
+  }
+
+  /**
    * @return The unique identifier generated for this cluster
    */
   public String getClusterId() {
@@ -385,6 +398,32 @@ public class MasterFileSystem {
     }
   }
 
+  /**
+   * Make sure the hbase temp directory exists and is empty.
+   * NOTE that this method is only executed once just after the master becomes the active one.
+   */
+  private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
+      throws IOException {
+    // If the temp directory exists, clear the content (left over, from the previous run)
+    if (fs.exists(tmpdir)) {
+      // Archive table in temp, maybe left over from failed deletion,
+      // if not the cleaner will take care of them.
+      for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
+        for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
+          HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
+        }
+      }
+      if (!fs.delete(tmpdir, true)) {
+        throw new IOException("Unable to clean the temp directory: " + tmpdir);
+      }
+    }
+
+    // Create the temp directory
+    if (!fs.mkdirs(tmpdir)) {
+      throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
+    }
+  }
+
   private static void bootstrap(final Path rd, final Configuration c)
   throws IOException {
     LOG.info("BOOTSTRAP: creating ROOT and first META regions");
@@ -451,6 +490,37 @@ public class MasterFileSystem {
     fs.delete(new Path(rootdir, Bytes.toString(tableName)), true);
   }
 
+  /**
+   * Move the specified file/directory to the hbase temp directory.
+   * @param path The path of the file/directory to move
+   * @return The temp location of the file/directory moved
+   * @throws IOException in case of file-system failure
+   */
+  public Path moveToTemp(final Path path) throws IOException {
+    Path tempPath = new Path(this.tempdir, path.getName());
+
+    // Ensure temp exists
+    if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) {
+      throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
+    }
+
+    if (!fs.rename(path, tempPath)) {
+      throw new IOException("Unable to move '" + path + "' to temp '" + tempPath + "'");
+    }
+
+    return tempPath;
+  }
+
+  /**
+   * Move the specified table to the hbase temp directory
+   * @param tableName Table name to move
+   * @return The temp location of the table moved
+   * @throws IOException in case of file-system failure
+   */
+  public Path moveTableToTemp(byte[] tableName) throws IOException {
+    return moveToTemp(HTableDescriptor.getTableDir(this.rootdir, tableName));
+  }
+
   public void updateRegionInfo(HRegionInfo region) {
     // TODO implement this.  i think this is currently broken in trunk i don't
     //      see this getting updated.

Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+
+/**
+ * Watch the current snapshot under process
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface SnapshotSentinel {
+
+  /**
+   * Check to see if the snapshot is finished, where finished may be success or failure.
+   * @return <tt>false</tt> if the snapshot is still in progress, <tt>true</tt> if the snapshot has
+   *         finished
+   */
+  public boolean isFinished();
+
+  /**
+   * Actively cancel a running snapshot.
+   * @param why Reason for cancellation.
+   */
+  public void cancel(String why);
+
+  /**
+   * @return the description of the snapshot being run
+   */
+  public SnapshotDescription getSnapshot();
+
+  /**
+   * Get the exception that caused the snapshot to fail, if the snapshot has failed.
+   * @return {@link ForeignException} that caused the snapshot to fail, or <tt>null</tt> if the
+   *  snapshot is still in progress or has succeeded
+   */
+  public ForeignException getExceptionIfFailed();
+
+}

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java Mon Mar  4 11:24:50 2013
@@ -82,7 +82,10 @@ public abstract class CleanerChore<T ext
     if (logCleaners != null) {
       for (String className : logCleaners) {
         T logCleaner = newFileCleaner(className, conf);
-        if (logCleaner != null) this.cleanersChain.add(logCleaner);
+        if (logCleaner != null) {
+          LOG.debug("initialize cleaner=" + className);
+          this.cleanersChain.add(logCleaner);
+        }
       }
     }
   }

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java Mon Mar  4 11:24:50 2013
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 /**
  * This Chore, every time it runs, will clear the HFiles in the hfile archive
@@ -46,6 +47,9 @@ public class HFileCleaner extends Cleane
 
   @Override
   protected boolean validate(Path file) {
+    if (HFileLink.isBackReferencesDir(file) || HFileLink.isBackReferencesDir(file.getParent())) {
+      return true;
+    }
     return StoreFile.validateStoreFileName(file.getName());
   }
 }