You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by mb...@apache.org on 2014/05/07 23:28:13 UTC

svn commit: r1593139 [3/6] - in /hbase/trunk: hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ hbase-protocol/src/main/protobuf/ hbase-server/src/main/java/org/apache/hadoop/hbase/client/ hbase-server/src/main/java/org/apache/ha...

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/MapReduce.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/MapReduce.proto?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/MapReduce.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/MapReduce.proto Wed May  7 21:28:12 2014
@@ -30,6 +30,7 @@ message ScanMetrics {
 }
 
 message TableSnapshotRegionSplit {
-  optional RegionSpecifier region = 1;
   repeated string locations = 2;
+  optional TableSchema table = 3;
+  optional RegionInfo region = 4;
 }

Added: hbase/trunk/hbase-protocol/src/main/protobuf/Snapshot.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/Snapshot.proto?rev=1593139&view=auto
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/Snapshot.proto (added)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/Snapshot.proto Wed May  7 21:28:12 2014
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "SnapshotProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "FS.proto";
+import "HBase.proto";
+
+message SnapshotFileInfo {
+  enum Type {
+    HFILE = 1;
+    WAL = 2;
+  }
+
+  required Type type = 1;
+
+  optional string hfile = 3;
+
+  optional string wal_server = 4;
+  optional string wal_name = 5;
+}
+
+message SnapshotRegionManifest {
+  optional int32 version = 1;
+
+  required RegionInfo region_info = 2;
+  repeated FamilyFiles family_files = 3;
+
+  message StoreFile {
+    required string name = 1;
+    optional Reference reference = 2;
+
+    // TODO: Add checksums or other fields to verify the file
+    optional uint64 file_size = 3;
+  }
+
+  message FamilyFiles {
+    required bytes family_name = 1;
+    repeated StoreFile store_files = 2;
+  }
+}
+
+message SnapshotDataManifest {
+  required TableSchema table_schema = 1;
+  repeated SnapshotRegionManifest region_manifests = 2;
+}

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java Wed May  7 21:28:12 2014
@@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.client;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.Set;
+import java.util.List;
 import java.util.UUID;
 
 import org.apache.commons.logging.Log;
@@ -32,16 +32,16 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.snapshot.ExportSnapshot;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
  * A Scanner which performs a scan over snapshot files. Using this class requires copying the
@@ -99,8 +99,7 @@ public class TableSnapshotScanner extend
    */
   public TableSnapshotScanner(Configuration conf, Path restoreDir,
       String snapshotName, Scan scan) throws IOException {
-    this(conf, new Path(conf.get(HConstants.HBASE_DIR)),
-      restoreDir, snapshotName, scan);
+    this(conf, FSUtils.getRootDir(conf), restoreDir, snapshotName, scan);
   }
 
   /**
@@ -128,22 +127,21 @@ public class TableSnapshotScanner extend
 
   private void init() throws IOException {
     Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+    SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
+    SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
 
-    //load table descriptor
-    htd = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
+    // load table descriptor
+    htd = manifest.getTableDescriptor();
 
-    Set<String> snapshotRegionNames
-      = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
-    if (snapshotRegionNames == null) {
+    List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
+    if (regionManifests == null) {
       throw new IllegalArgumentException("Snapshot seems empty");
     }
 
-    regions = new ArrayList<HRegionInfo>(snapshotRegionNames.size());
-    for (String regionName : snapshotRegionNames) {
+    regions = new ArrayList<HRegionInfo>(regionManifests.size());
+    for (SnapshotRegionManifest regionManifest : regionManifests) {
       // load region descriptor
-      Path regionDir = new Path(snapshotDir, regionName);
-      HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs,
-          regionDir);
+      HRegionInfo hri = HRegionInfo.convert(regionManifest.getRegionInfo());
 
       if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
           hri.getStartKey(), hri.getEndKey())) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java Wed May  7 21:28:12 2014
@@ -190,7 +190,7 @@ public class Reference {
     }
   }
 
-  FSProtos.Reference convert() {
+  public FSProtos.Reference convert() {
     FSProtos.Reference.Builder builder = FSProtos.Reference.newBuilder();
     builder.setRange(isTopFileRegion(getFileRegion())?
       FSProtos.Reference.Range.TOP: FSProtos.Reference.Range.BOTTOM);
@@ -198,7 +198,7 @@ public class Reference {
     return builder.build();
   }
 
-  static Reference convert(final FSProtos.Reference r) {
+  public static Reference convert(final FSProtos.Reference r) {
     Reference result = new Reference();
     result.splitkey = r.getSplitkey().toByteArray();
     result.region = r.getRange() == FSProtos.Reference.Range.TOP? Range.top: Range.bottom;

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java Wed May  7 21:28:12 2014
@@ -25,10 +25,8 @@ import java.io.IOException;
 import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.Set;
 import java.util.UUID;
 
-import com.google.protobuf.HBaseZeroCopyByteString;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -37,7 +35,6 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution.HostAndWeight;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -49,18 +46,15 @@ import org.apache.hadoop.hbase.client.Sc
 import org.apache.hadoop.hbase.client.TableSnapshotScanner;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.snapshot.ExportSnapshot;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputSplit;
@@ -82,7 +76,7 @@ import com.google.common.annotations.Vis
  * while there are jobs reading from snapshot files.
  * <p>
  * Usage is similar to TableInputFormat, and
- * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, 
+ * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job,
  *   boolean, Path)}
  * can be used to configure the job.
  * <pre>{@code
@@ -101,12 +95,12 @@ import com.google.common.annotations.Vis
  * <p>
  * HBase owns all the data and snapshot files on the filesystem. Only the HBase user can read from
  * snapshot files and data files. HBase also enforces security because all the requests are handled
- * by the server layer, and the user cannot read from the data files directly. 
- * To read from snapshot files directly from the file system, the user who is running the MR job 
- * must have sufficient permissions to access snapshot and reference files. 
- * This means that to run mapreduce over snapshot files, the MR job has to be run as the HBase 
- * user or the user must have group or other priviledges in the filesystem (See HBASE-8369). 
- * Note that, given other users access to read from snapshot/data files will completely circumvent 
+ * by the server layer, and the user cannot read from the data files directly.
+ * To read from snapshot files directly from the file system, the user who is running the MR job
+ * must have sufficient permissions to access snapshot and reference files.
+ * This means that to run mapreduce over snapshot files, the MR job has to be run as the HBase
+ * user or the user must have group or other priviledges in the filesystem (See HBASE-8369).
+ * Note that, given other users access to read from snapshot/data files will completely circumvent
  * the access control enforced by HBase.
  * @see TableSnapshotScanner
  */
@@ -119,22 +113,25 @@ public class TableSnapshotInputFormat ex
   private static final Log LOG = LogFactory.getLog(TableSnapshotInputFormat.class);
 
   /** See {@link #getBestLocations(Configuration, HDFSBlocksDistribution)} */
-  private static final String LOCALITY_CUTOFF_MULTIPLIER = 
+  private static final String LOCALITY_CUTOFF_MULTIPLIER =
       "hbase.tablesnapshotinputformat.locality.cutoff.multiplier";
   private static final float DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f;
 
   private static final String SNAPSHOT_NAME_KEY = "hbase.TableSnapshotInputFormat.snapshot.name";
   private static final String TABLE_DIR_KEY = "hbase.TableSnapshotInputFormat.table.dir";
 
-  public static class TableSnapshotRegionSplit extends InputSplit implements Writable {
-    private String regionName;
+  @VisibleForTesting
+  static class TableSnapshotRegionSplit extends InputSplit implements Writable {
+    private HTableDescriptor htd;
+    private HRegionInfo regionInfo;
     private String[] locations;
 
     // constructor for mapreduce framework / Writable
     public TableSnapshotRegionSplit() { }
 
-    TableSnapshotRegionSplit(String regionName, List<String> locations) {
-      this.regionName = regionName;
+    TableSnapshotRegionSplit(HTableDescriptor htd, HRegionInfo regionInfo, List<String> locations) {
+      this.htd = htd;
+      this.regionInfo = regionInfo;
       if (locations == null || locations.isEmpty()) {
         this.locations = new String[0];
       } else {
@@ -158,9 +155,8 @@ public class TableSnapshotInputFormat ex
     public void write(DataOutput out) throws IOException {
     MapReduceProtos.TableSnapshotRegionSplit.Builder builder =
       MapReduceProtos.TableSnapshotRegionSplit.newBuilder()
-        .setRegion(RegionSpecifier.newBuilder()
-          .setType(RegionSpecifierType.ENCODED_REGION_NAME)
-          .setValue(HBaseZeroCopyByteString.wrap(Bytes.toBytes(regionName))).build());
+        .setTable(htd.convert())
+        .setRegion(HRegionInfo.convert(regionInfo));
 
       for (String location : locations) {
         builder.addLocations(location);
@@ -180,16 +176,17 @@ public class TableSnapshotInputFormat ex
       int len = in.readInt();
       byte[] buf = new byte[len];
       in.readFully(buf);
-      MapReduceProtos.TableSnapshotRegionSplit split = 
+      MapReduceProtos.TableSnapshotRegionSplit split =
           MapReduceProtos.TableSnapshotRegionSplit.PARSER.parseFrom(buf);
-      this.regionName = Bytes.toString(split.getRegion().getValue().toByteArray());
+      this.htd = HTableDescriptor.convert(split.getTable());
+      this.regionInfo = HRegionInfo.convert(split.getRegion());
       List<String> locationsList = split.getLocationsList();
       this.locations = locationsList.toArray(new String[locationsList.size()]);
     }
   }
 
   @VisibleForTesting
-  static class TableSnapshotRegionRecordReader extends 
+  static class TableSnapshotRegionRecordReader extends
     RecordReader<ImmutableBytesWritable, Result> {
     private TableSnapshotRegionSplit split;
     private Scan scan;
@@ -205,23 +202,13 @@ public class TableSnapshotInputFormat ex
 
       Configuration conf = context.getConfiguration();
       this.split = (TableSnapshotRegionSplit) split;
-      String regionName = this.split.regionName;
-      String snapshotName = getSnapshotName(conf);
-      Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
-      FileSystem fs = rootDir.getFileSystem(conf);
+      HTableDescriptor htd = this.split.htd;
+      HRegionInfo hri = this.split.regionInfo;
+      FileSystem fs = FSUtils.getCurrentFileSystem(conf);
 
       Path tmpRootDir = new Path(conf.get(TABLE_DIR_KEY)); // This is the user specified root
       // directory where snapshot was restored
 
-      Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
-
-      //load table descriptor
-      HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
-
-      //load region descriptor
-      Path regionDir = new Path(snapshotDir, regionName);
-      HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-
       // create scan
       String scanStr = conf.get(TableInputFormat.SCAN);
       if (scanStr == null) {
@@ -294,31 +281,28 @@ public class TableSnapshotInputFormat ex
     Configuration conf = job.getConfiguration();
     String snapshotName = getSnapshotName(conf);
 
-    Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
+    Path rootDir = FSUtils.getRootDir(conf);
     FileSystem fs = rootDir.getFileSystem(conf);
 
     Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
-
-    Set<String> snapshotRegionNames
-      = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
-    if (snapshotRegionNames == null) {
+    SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
+    SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
+    List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
+    if (regionManifests == null) {
       throw new IllegalArgumentException("Snapshot seems empty");
     }
 
     // load table descriptor
-    HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs,
-        snapshotDir);
+    HTableDescriptor htd = manifest.getTableDescriptor();
 
     Scan scan = TableMapReduceUtil.convertStringToScan(conf
       .get(TableInputFormat.SCAN));
     Path tableDir = new Path(conf.get(TABLE_DIR_KEY));
 
     List<InputSplit> splits = new ArrayList<InputSplit>();
-    for (String regionName : snapshotRegionNames) {
+    for (SnapshotRegionManifest regionManifest : regionManifests) {
       // load region descriptor
-      Path regionDir = new Path(snapshotDir, regionName);
-      HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs,
-          regionDir);
+      HRegionInfo hri = HRegionInfo.convert(regionManifest.getRegionInfo());
 
       if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
           hri.getStartKey(), hri.getEndKey())) {
@@ -329,7 +313,7 @@ public class TableSnapshotInputFormat ex
 
         int len = Math.min(3, hosts.size());
         hosts = hosts.subList(0, len);
-        splits.add(new TableSnapshotRegionSplit(regionName, hosts));
+        splits.add(new TableSnapshotRegionSplit(htd, hri, hosts));
       }
     }
 
@@ -341,7 +325,7 @@ public class TableSnapshotInputFormat ex
    * weights into account, thus will treat every location passed from the input split as equal. We
    * do not want to blindly pass all the locations, since we are creating one split per region, and
    * the region's blocks are all distributed throughout the cluster unless favorite node assignment
-   * is used. On the expected stable case, only one location will contain most of the blocks as 
+   * is used. On the expected stable case, only one location will contain most of the blocks as
    * local.
    * On the other hand, in favored node assignment, 3 nodes will contain highly local blocks. Here
    * we are doing a simple heuristic, where we will pass all hosts which have at least 80%
@@ -391,7 +375,7 @@ public class TableSnapshotInputFormat ex
     Configuration conf = job.getConfiguration();
     conf.set(SNAPSHOT_NAME_KEY, snapshotName);
 
-    Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
+    Path rootDir = FSUtils.getRootDir(conf);
     FileSystem fs = rootDir.getFileSystem(conf);
 
     restoreDir = new Path(restoreDir, UUID.randomUUID().toString());

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java Wed May  7 21:28:12 2014
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.snapshot.
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 
 import com.google.common.base.Preconditions;
 
@@ -107,8 +108,9 @@ public class CloneSnapshotHandler extend
     try {
       // 1. Execute the on-disk Clone
       Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
+      SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot);
       RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(conf, fs,
-          snapshot, snapshotDir, hTableDescriptor, tableRootDir, monitor, status);
+          manifest, hTableDescriptor, tableRootDir, monitor, status);
       metaChanges = restoreHelper.restoreHdfsRegions();
 
       // Clone operation should not have stuff to restore or remove

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java Wed May  7 21:28:12 2014
@@ -24,23 +24,18 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionListener;
 import org.apache.hadoop.hbase.errorhandling.TimeoutExceptionInjector;
 import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.master.MetricsMaster;
-import org.apache.hadoop.hbase.monitoring.MonitoredTask;
-import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.TableInfoCopyTask;
-import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.zookeeper.KeeperException;
 
@@ -64,7 +59,7 @@ public class DisabledTableSnapshotHandle
     super(snapshot, masterServices);
 
     // setup the timer
-    timeoutInjector = TakeSnapshotUtils.getMasterTimerAndBindToMonitor(snapshot, conf, monitor);
+    timeoutInjector = getMasterTimerAndBindToMonitor(snapshot, conf, monitor);
   }
 
   @Override
@@ -80,8 +75,6 @@ public class DisabledTableSnapshotHandle
     try {
       timeoutInjector.start();
 
-      Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
-
       // 1. get all the regions hosting this table.
 
       // extract each pair to separate lists
@@ -95,34 +88,38 @@ public class DisabledTableSnapshotHandle
           + ClientSnapshotDescriptionUtils.toString(snapshot);
       LOG.info(msg);
       status.setStatus(msg);
-      for (HRegionInfo regionInfo : regions) {
+      for (HRegionInfo regionInfo: regions) {
         snapshotDisabledRegion(regionInfo);
       }
-
-      // 3. write the table info to disk
-      LOG.info("Starting to copy tableinfo for offline snapshot: " +
-      ClientSnapshotDescriptionUtils.toString(snapshot));
-      TableInfoCopyTask tableInfoCopyTask = new TableInfoCopyTask(this.monitor, snapshot, fs,
-          FSUtils.getRootDir(conf));
-      tableInfoCopyTask.call();
-      monitor.rethrowException();
-      status.setStatus("Finished copying tableinfo for snapshot of table: " +
-          snapshotTable);
     } catch (Exception e) {
       // make sure we capture the exception to propagate back to the client later
       String reason = "Failed snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot)
           + " due to exception:" + e.getMessage();
       ForeignException ee = new ForeignException(reason, e);
       monitor.receive(ee);
-      status.abort("Snapshot of table: "+ snapshotTable +
-          " failed because " + e.getMessage());
+      status.abort("Snapshot of table: "+ snapshotTable + " failed because " + e.getMessage());
     } finally {
       LOG.debug("Marking snapshot" + ClientSnapshotDescriptionUtils.toString(snapshot)
           + " as finished.");
 
-      // 6. mark the timer as finished - even if we got an exception, we don't need to time the
+      // 3. mark the timer as finished - even if we got an exception, we don't need to time the
       // operation any further
       timeoutInjector.complete();
     }
   }
+
+
+  /**
+   * Create a snapshot timer for the master which notifies the monitor when an error occurs
+   * @param snapshot snapshot to monitor
+   * @param conf configuration to use when getting the max snapshot life
+   * @param monitor monitor to notify when the snapshot life expires
+   * @return the timer to use update to signal the start and end of the snapshot
+   */
+  private TimeoutExceptionInjector getMasterTimerAndBindToMonitor(SnapshotDescription snapshot,
+      Configuration conf, ForeignExceptionListener monitor) {
+    long maxTime = SnapshotDescriptionUtils.getMaxMasterTimeout(conf, snapshot.getType(),
+      SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
+    return new TimeoutExceptionInjector(monitor, maxTime);
+  }
 }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java Wed May  7 21:28:12 2014
@@ -25,17 +25,14 @@ import java.util.Set;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.master.MetricsMaster;
 import org.apache.hadoop.hbase.procedure.Procedure;
 import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.util.Pair;
 
 import com.google.common.collect.Lists;
@@ -99,14 +96,11 @@ public class EnabledTableSnapshotHandler
       LOG.info("Done waiting - online snapshot for " + this.snapshot.getName());
 
       // Take the offline regions as disabled
-      Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
       for (Pair<HRegionInfo, ServerName> region : regions) {
         HRegionInfo regionInfo = region.getFirst();
         if (regionInfo.isOffline() && (regionInfo.isSplit() || regionInfo.isSplitParent())) {
-          if (!fs.exists(new Path(snapshotDir, regionInfo.getEncodedName()))) {
-            LOG.info("Take disabled snapshot of offline region=" + regionInfo);
-            snapshotDisabledRegion(regionInfo);
-          }
+          LOG.info("Take disabled snapshot of offline region=" + regionInfo);
+          snapshotDisabledRegion(regionInfo);
         }
       }
     } catch (InterruptedException e) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java Wed May  7 21:28:12 2014
@@ -19,35 +19,28 @@ package org.apache.hadoop.hbase.master.s
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.catalog.MetaReader;
-import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
-import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.FSVisitor;
-import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 
 /**
  * General snapshot verification on the master.
@@ -110,14 +103,16 @@ public final class MasterSnapshotVerifie
    */
   public void verifySnapshot(Path snapshotDir, Set<String> snapshotServers)
       throws CorruptedSnapshotException, IOException {
+    SnapshotManifest manifest = SnapshotManifest.open(services.getConfiguration(), fs,
+                                                      snapshotDir, snapshot);
     // verify snapshot info matches
     verifySnapshotDescription(snapshotDir);
 
     // check that tableinfo is a valid table description
-    verifyTableInfo(snapshotDir);
+    verifyTableInfo(manifest);
 
     // check that each region is valid
-    verifyRegions(snapshotDir);
+    verifyRegions(manifest);
   }
 
   /**
@@ -136,8 +131,16 @@ public final class MasterSnapshotVerifie
    * Check that the table descriptor for the snapshot is a valid table descriptor
    * @param snapshotDir snapshot directory to check
    */
-  private void verifyTableInfo(Path snapshotDir) throws IOException {
-    FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
+  private void verifyTableInfo(final SnapshotManifest manifest) throws IOException {
+    HTableDescriptor htd = manifest.getTableDescriptor();
+    if (htd == null) {
+      throw new CorruptedSnapshotException("Missing Table Descriptor", snapshot);
+    }
+
+    if (!htd.getNameAsString().equals(snapshot.getTable())) {
+      throw new CorruptedSnapshotException("Invalid Table Descriptor. Expected "
+        + snapshot.getTable() + " name, got " + htd.getNameAsString(), snapshot);
+    }
   }
 
   /**
@@ -145,34 +148,36 @@ public final class MasterSnapshotVerifie
    * @param snapshotDir snapshot directory to check
    * @throws IOException if we can't reach hbase:meta or read the files from the FS
    */
-  private void verifyRegions(Path snapshotDir) throws IOException {
+  private void verifyRegions(final SnapshotManifest manifest) throws IOException {
     List<HRegionInfo> regions = MetaReader.getTableRegions(this.services.getCatalogTracker(),
         tableName);
 
-    Set<String> snapshotRegions = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
-    if (snapshotRegions == null) {
+    Map<String, SnapshotRegionManifest> regionManifests = manifest.getRegionManifestsMap();
+    if (regionManifests == null) {
       String msg = "Snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " looks empty";
       LOG.error(msg);
       throw new CorruptedSnapshotException(msg);
     }
 
     String errorMsg = "";
-    if (snapshotRegions.size() != regions.size()) {
-      errorMsg = "Regions moved during the snapshot '" + 
+    if (regionManifests.size() != regions.size()) {
+      errorMsg = "Regions moved during the snapshot '" +
                    ClientSnapshotDescriptionUtils.toString(snapshot) + "'. expected=" +
-                   regions.size() + " snapshotted=" + snapshotRegions.size() + ".";
+                   regions.size() + " snapshotted=" + regionManifests.size() + ".";
       LOG.error(errorMsg);
     }
 
     for (HRegionInfo region : regions) {
-      if (!snapshotRegions.contains(region.getEncodedName())) {
+      SnapshotRegionManifest regionManifest = regionManifests.get(region.getEncodedName());
+      if (regionManifest == null) {
         // could happen due to a move or split race.
         String mesg = " No snapshot region directory found for region:" + region;
         if (errorMsg.isEmpty()) errorMsg = mesg;
         LOG.error(mesg);
+        continue;
       }
 
-      verifyRegion(fs, snapshotDir, region);
+      verifyRegion(fs, manifest.getSnapshotDir(), region, regionManifest);
     }
     if (!errorMsg.isEmpty()) {
       throw new CorruptedSnapshotException(errorMsg);
@@ -185,65 +190,24 @@ public final class MasterSnapshotVerifie
    * @param snapshotDir snapshot directory to check
    * @param region the region to check
    */
-  private void verifyRegion(final FileSystem fs, final Path snapshotDir, final HRegionInfo region)
-      throws IOException {
-    // make sure we have region in the snapshot
-    Path regionDir = new Path(snapshotDir, region.getEncodedName());
-
-    // make sure we have the region info in the snapshot
-    Path regionInfo = new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE);
-    // make sure the file exists
-    if (!fs.exists(regionInfo)) {
-      throw new CorruptedSnapshotException("No region info found for region:" + region, snapshot);
-    }
-
-    HRegionInfo found = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-    if (!region.equals(found)) {
-      throw new CorruptedSnapshotException("Found region info (" + found
-        + ") doesn't match expected region:" + region, snapshot);
-    }
-
-    // make sure we have the expected recovered edits files
-    TakeSnapshotUtils.verifyRecoveredEdits(fs, snapshotDir, found, snapshot);
-
-     // make sure we have all the expected store files
-    SnapshotReferenceUtil.visitRegionStoreFiles(fs, regionDir, new FSVisitor.StoreFileVisitor() {
-      public void storeFile(final String regionNameSuffix, final String family,
-          final String hfileName) throws IOException {
-        verifyStoreFile(snapshotDir, region, family, hfileName);
+  private void verifyRegion(final FileSystem fs, final Path snapshotDir, final HRegionInfo region,
+      final SnapshotRegionManifest manifest) throws IOException {
+    HRegionInfo manifestRegionInfo = HRegionInfo.convert(manifest.getRegionInfo());
+    if (!region.equals(manifestRegionInfo)) {
+      String msg = "Manifest region info " + manifestRegionInfo +
+                   "doesn't match expected region:" + region;
+      throw new CorruptedSnapshotException(msg, snapshot);
+    }
+
+    // make sure we have all the expected store files
+    SnapshotReferenceUtil.visitRegionStoreFiles(manifest,
+        new SnapshotReferenceUtil.StoreFileVisitor() {
+      @Override
+      public void storeFile(final HRegionInfo regionInfo, final String family,
+          final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
+        SnapshotReferenceUtil.verifyStoreFile(services.getConfiguration(), fs, snapshotDir,
+          snapshot, region, family, storeFile);
       }
     });
   }
-
-  private void verifyStoreFile(final Path snapshotDir, final HRegionInfo regionInfo,
-      final String family, final String fileName) throws IOException {
-    Path refPath = null;
-    if (StoreFileInfo.isReference(fileName)) {
-      // If is a reference file check if the parent file is present in the snapshot
-      Path snapshotHFilePath = new Path(new Path(
-          new Path(snapshotDir, regionInfo.getEncodedName()), family), fileName);
-      refPath = StoreFileInfo.getReferredToFile(snapshotHFilePath);
-      if (!fs.exists(refPath)) {
-        throw new CorruptedSnapshotException("Missing parent hfile for: " + fileName, snapshot);
-      }
-    }
-
-    Path linkPath;
-    if (refPath != null && HFileLink.isHFileLink(refPath)) {
-      linkPath = new Path(family, refPath.getName());
-    } else if (HFileLink.isHFileLink(fileName)) {
-      linkPath = new Path(family, fileName);
-    } else {
-      linkPath = new Path(family, HFileLink.createHFileLinkName(tableName,
-        regionInfo.getEncodedName(), fileName));
-    }
-
-    // check if the linked file exists (in the archive, or in the table dir)
-    HFileLink link = new HFileLink(services.getConfiguration(), linkPath);
-    if (!link.exists(fs)) {
-      throw new CorruptedSnapshotException("Can't find hfile: " + fileName
-          + " in the real (" + link.getOriginPath() + ") or archive (" + link.getArchivePath()
-          + ") directory for the primary table.", snapshot);
-    }
-  }
 }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java Wed May  7 21:28:12 2014
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.snapshot.
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 
 /**
  * Handler to Restore a snapshot.
@@ -120,9 +121,11 @@ public class RestoreSnapshotHandler exte
       // 2. Execute the on-disk Restore
       LOG.debug("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot));
       Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
+      SnapshotManifest manifest = SnapshotManifest.open(masterServices.getConfiguration(), fs,
+                                                        snapshotDir, snapshot);
       RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(
-          masterServices.getConfiguration(), fs,
-          snapshot, snapshotDir, hTableDescriptor, rootDir, monitor, status);
+          masterServices.getConfiguration(), fs, manifest,
+          this.hTableDescriptor, rootDir, monitor, status);
       RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
 
       // 3. Forces all the RegionStates to be offline

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java Wed May  7 21:28:12 2014
@@ -174,19 +174,24 @@ public class SnapshotFileCache implement
   // is an illegal access to the cache. Really we could do a mutex-guarded pointer swap on the
   // cache, but that seems overkill at the moment and isn't necessarily a bottleneck.
   public synchronized boolean contains(String fileName) throws IOException {
-    if (this.cache.contains(fileName)) return true;
-
-    refreshCache();
-
-    // then check again
-    return this.cache.contains(fileName);
+    boolean hasFile = this.cache.contains(fileName);
+    if (!hasFile) {
+      refreshCache();
+      // then check again
+      hasFile = this.cache.contains(fileName);
+    }
+    return hasFile;
   }
 
   private synchronized void refreshCache() throws IOException {
-    // get the status of the snapshots directory and <snapshot dir>/.tmp
-    FileStatus dirStatus, tempStatus;
+    long lastTimestamp = Long.MAX_VALUE;
+    boolean hasChanges = false;
+
+    // get the status of the snapshots directory and check if it is has changes
     try {
-      dirStatus = fs.getFileStatus(snapshotDir);
+      FileStatus dirStatus = fs.getFileStatus(snapshotDir);
+      lastTimestamp = dirStatus.getModificationTime();
+      hasChanges |= (lastTimestamp >= lastModifiedTime);
     } catch (FileNotFoundException e) {
       if (this.cache.size() > 0) {
         LOG.error("Snapshot directory: " + snapshotDir + " doesn't exist");
@@ -194,16 +199,28 @@ public class SnapshotFileCache implement
       return;
     }
 
+    // get the status of the snapshots temporary directory and check if it has changes
+    // The top-level directory timestamp is not updated, so we have to check the inner-level.
     try {
       Path snapshotTmpDir = new Path(snapshotDir, SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME);
-      tempStatus = fs.getFileStatus(snapshotTmpDir);
+      FileStatus tempDirStatus = fs.getFileStatus(snapshotTmpDir);
+      lastTimestamp = Math.min(lastTimestamp, tempDirStatus.getModificationTime());
+      hasChanges |= (lastTimestamp >= lastModifiedTime);
+      if (!hasChanges) {
+        FileStatus[] tmpSnapshots = FSUtils.listStatus(fs, snapshotDir);
+        if (tmpSnapshots != null) {
+          for (FileStatus dirStatus: tmpSnapshots) {
+            lastTimestamp = Math.min(lastTimestamp, dirStatus.getModificationTime());
+          }
+          hasChanges |= (lastTimestamp >= lastModifiedTime);
+        }
+      }
     } catch (FileNotFoundException e) {
-      tempStatus = dirStatus;
+      // Nothing todo, if the tmp dir is empty
     }
 
     // if the snapshot directory wasn't modified since we last check, we are done
-    if (dirStatus.getModificationTime() <= lastModifiedTime &&
-        tempStatus.getModificationTime() <= lastModifiedTime) {
+    if (!hasChanges) {
       return;
     }
 
@@ -213,8 +230,7 @@ public class SnapshotFileCache implement
     // However, snapshot directories are only created once, so this isn't an issue.
 
     // 1. update the modified time
-    this.lastModifiedTime = Math.min(dirStatus.getModificationTime(),
-                                     tempStatus.getModificationTime());
+    this.lastModifiedTime = lastTimestamp;
 
     // 2.clear the cache
     this.cache.clear();

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java Wed May  7 21:28:12 2014
@@ -66,7 +66,7 @@ public class SnapshotHFileCleaner extend
   }
 
   @Override
-  public void setConf(Configuration conf) {
+  public void setConf(final Configuration conf) {
     super.setConf(conf);
     try {
       long cacheRefreshPeriod = conf.getLong(HFILE_CACHE_REFRESH_PERIOD_CONF_KEY,
@@ -77,7 +77,7 @@ public class SnapshotHFileCleaner extend
           "snapshot-hfile-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
             public Collection<String> filesUnderSnapshot(final Path snapshotDir)
                 throws IOException {
-              return SnapshotReferenceUtil.getHFileNames(fs, snapshotDir);
+              return SnapshotReferenceUtil.getHFileNames(conf, fs, snapshotDir);
             }
           });
     } catch (IOException e) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java Wed May  7 21:28:12 2014
@@ -70,11 +70,11 @@ import org.apache.hadoop.hbase.snapshot.
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
 import org.apache.hadoop.hbase.snapshot.SnapshotExistsException;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
 import org.apache.hadoop.hbase.snapshot.TablePartiallyOpenException;
 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.zookeeper.KeeperException;
 
@@ -540,9 +540,12 @@ public class SnapshotManager extends Mas
           + "' doesn't exist, can't take snapshot.", snapshot);
     }
 
-    // set the snapshot version, now that we are ready to take it
-    snapshot = snapshot.toBuilder().setVersion(SnapshotDescriptionUtils.SNAPSHOT_LAYOUT_VERSION)
-        .build();
+    // if not specified, set the snapshot format
+    if (!snapshot.hasVersion()) {
+      snapshot = snapshot.toBuilder()
+          .setVersion(SnapshotDescriptionUtils.SNAPSHOT_LAYOUT_VERSION)
+          .build();
+    }
 
     // call pre coproc hook
     MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
@@ -676,15 +679,16 @@ public class SnapshotManager extends Mas
 
     // read snapshot information
     SnapshotDescription fsSnapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
-    HTableDescriptor snapshotTableDesc =
-        FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
+    SnapshotManifest manifest = SnapshotManifest.open(master.getConfiguration(), fs,
+        snapshotDir, fsSnapshot);
+    HTableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
     TableName tableName = TableName.valueOf(reqSnapshot.getTable());
 
     // stop tracking "abandoned" handlers
     cleanupSentinels();
 
     // Verify snapshot validity
-    SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, snapshotDir, fsSnapshot);
+    SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest);
 
     // Execute the restore/clone operation
     if (MetaReader.tableExists(master.getCatalogTracker(), tableName)) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java Wed May  7 21:28:12 2014
@@ -48,14 +48,11 @@ import org.apache.hadoop.hbase.master.Ta
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.CopyRecoveredEditsTask;
-import org.apache.hadoop.hbase.snapshot.ReferenceRegionHFilesTask;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.TableInfoCopyTask;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.zookeeper.KeeperException;
 
@@ -88,6 +85,9 @@ public abstract class TakeSnapshotHandle
   protected final TableLock tableLock;
   protected final MonitoredTask status;
   protected final TableName snapshotTable;
+  protected final SnapshotManifest snapshotManifest;
+
+  protected HTableDescriptor htd;
 
   /**
    * @param snapshot descriptor of the snapshot to take
@@ -107,6 +107,7 @@ public abstract class TakeSnapshotHandle
     this.snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
     this.workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
     this.monitor = new ForeignExceptionDispatcher(snapshot.getName());
+    this.snapshotManifest = SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor);
 
     this.tableLockManager = master.getTableLockManager();
     this.tableLock = this.tableLockManager.writeLock(
@@ -136,7 +137,7 @@ public abstract class TakeSnapshotHandle
                               // case of exceptions
     boolean success = false;
     try {
-      loadTableDescriptor(); // check that .tableinfo is present
+      this.htd = loadTableDescriptor(); // check that .tableinfo is present
       success = true;
     } finally {
       if (!success) {
@@ -162,8 +163,8 @@ public abstract class TakeSnapshotHandle
       // an external exception that gets captured here.
 
       // write down the snapshot info in the working directory
-      SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, this.fs);
-      new TableInfoCopyTask(monitor, snapshot, fs, rootDir).call();
+      SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs);
+      snapshotManifest.addTableDescriptor(this.htd);
       monitor.rethrowException();
 
       List<Pair<HRegionInfo, ServerName>> regionsAndLocations =
@@ -184,16 +185,19 @@ public abstract class TakeSnapshotHandle
         }
       }
 
+      // flush the in-memory state, and write the single manifest
+      status.setStatus("Consolidate snapshot: " + snapshot.getName());
+      snapshotManifest.consolidate();
+
       // verify the snapshot is valid
       status.setStatus("Verifying snapshot: " + snapshot.getName());
       verifier.verifySnapshot(this.workingDir, serverNames);
 
       // complete the snapshot, atomically moving from tmp to .snapshot dir.
       completeSnapshot(this.snapshotDir, this.workingDir, this.fs);
-      status.markComplete("Snapshot " + snapshot.getName() + " of table " + snapshotTable
-          + " completed");
-      LOG.info("Snapshot " + snapshot.getName() + " of table " + snapshotTable
-          + " completed");
+      msg = "Snapshot " + snapshot.getName() + " of table " + snapshotTable + " completed";
+      status.markComplete(msg);
+      LOG.info(msg);
       metricsSnapshot.addSnapshot(status.getCompletionTimestamp() - status.getStartTime());
     } catch (Exception e) {
       status.abort("Failed to complete snapshot " + snapshot.getName() + " on table " +
@@ -204,8 +208,7 @@ public abstract class TakeSnapshotHandle
       ForeignException ee = new ForeignException(reason, e);
       monitor.receive(ee);
       // need to mark this completed to close off and allow cleanup to happen.
-      cancel("Failed to take snapshot '" + ClientSnapshotDescriptionUtils.toString(snapshot)
-          + "' due to exception");
+      cancel(reason);
     } finally {
       LOG.debug("Launching cleanup of working dir:" + workingDir);
       try {
@@ -262,26 +265,10 @@ public abstract class TakeSnapshotHandle
    */
   protected void snapshotDisabledRegion(final HRegionInfo regionInfo)
       throws IOException {
-    // 2 copy the regionInfo files to the snapshot
-    HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
-      workingDir, regionInfo);
-
-    // check for error for each region
-    monitor.rethrowException();
-
-    // 2 for each region, copy over its recovered.edits directory
-    Path regionDir = HRegion.getRegionDir(rootDir, regionInfo);
-    Path snapshotRegionDir = regionFs.getRegionDir();
-    new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, snapshotRegionDir).call();
-    monitor.rethrowException();
-    status.setStatus("Completed copying recovered edits for offline snapshot of table: "
-        + snapshotTable);
-
-    // 2 reference all the files in the region
-    new ReferenceRegionHFilesTask(snapshot, monitor, regionDir, fs, snapshotRegionDir).call();
+    snapshotManifest.addRegion(FSUtils.getTableDir(rootDir, snapshotTable), regionInfo);
     monitor.rethrowException();
-    status.setStatus("Completed referencing HFiles for offline snapshot of table: " +
-        snapshotTable);
+    status.setStatus("Completed referencing HFiles for offline region " + regionInfo.toString() +
+        " of table: " + snapshotTable);
   }
 
   @Override

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java Wed May  7 21:28:12 2014
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.procedur
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Wed May  7 21:28:12 2014
@@ -127,6 +127,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.ClassSize;
@@ -193,7 +194,7 @@ public class HRegion implements HeapSize
 
   public static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY =
       "hbase.hregion.scan.loadColumnFamiliesOnDemand";
-      
+
   /**
    * This is the global default value for durability. All tables/mutations not
    * defining a durability or using USE_DEFAULT will default to this value.
@@ -472,7 +473,7 @@ public class HRegion implements HeapSize
   private RegionServerAccounting rsAccounting;
   private List<Pair<Long, Long>> recentFlushes = new ArrayList<Pair<Long,Long>>();
   private long flushCheckInterval;
-  // flushPerChanges is to prevent too many changes in memstore    
+  // flushPerChanges is to prevent too many changes in memstore
   private long flushPerChanges;
   private long blockingMemStoreSize;
   final long threadWakeFrequency;
@@ -572,7 +573,7 @@ public class HRegion implements HeapSize
       throw new IllegalArgumentException(MEMSTORE_FLUSH_PER_CHANGES + " can not exceed "
           + MAX_FLUSH_PER_CHANGES);
     }
-    
+
     this.rowLockWaitDuration = conf.getInt("hbase.rowlock.wait.duration",
                     DEFAULT_ROWLOCK_WAIT_DURATION);
 
@@ -807,7 +808,7 @@ public class HRegion implements HeapSize
           for (Store store : this.stores.values()) {
             try {
               store.close();
-            } catch (IOException e) { 
+            } catch (IOException e) {
               LOG.warn(e.getMessage());
             }
           }
@@ -1144,7 +1145,7 @@ public class HRegion implements HeapSize
                 // so we do not lose data
                 throw new DroppedSnapshotException("Failed clearing memory after " +
                   actualFlushes + " attempts on region: " + Bytes.toStringBinary(getRegionName()));
-              } 
+              }
               LOG.info("Running extra flush, " + actualFlushes +
                 " (carrying snapshot?) " + this);
             }
@@ -2786,59 +2787,12 @@ public class HRegion implements HeapSize
    */
   public void addRegionToSnapshot(SnapshotDescription desc,
       ForeignExceptionSnare exnSnare) throws IOException {
-    // This should be "fast" since we don't rewrite store files but instead
-    // back up the store files by creating a reference
-    Path rootDir = FSUtils.getRootDir(this.rsServices.getConfiguration());
+    Path rootDir = FSUtils.getRootDir(conf);
     Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
 
-    // 1. dump region meta info into the snapshot directory
-    LOG.debug("Storing region-info for snapshot.");
-    HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
-        this.fs.getFileSystem(), snapshotDir, getRegionInfo());
-
-    // 2. iterate through all the stores in the region
-    LOG.debug("Creating references for hfiles");
-
-    // This ensures that we have an atomic view of the directory as long as we have < ls limit
-    // (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in
-    // batches and may miss files being added/deleted. This could be more robust (iteratively
-    // checking to see if we have all the files until we are sure), but the limit is currently 1000
-    // files/batch, far more than the number of store files under a single column family.
-    for (Store store : stores.values()) {
-      // 2.1. build the snapshot reference directory for the store
-      Path dstStoreDir = snapshotRegionFs.getStoreDir(store.getFamily().getNameAsString());
-      List<StoreFile> storeFiles = new ArrayList<StoreFile>(store.getStorefiles());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding snapshot references for " + storeFiles  + " hfiles");
-      }
-
-      // 2.2. iterate through all the store's files and create "references".
-      int sz = storeFiles.size();
-      for (int i = 0; i < sz; i++) {
-        if (exnSnare != null) {
-          exnSnare.rethrowException();
-        }
-        StoreFile storeFile = storeFiles.get(i);
-        Path file = storeFile.getPath();
-
-        LOG.debug("Creating reference for file (" + (i+1) + "/" + sz + ") : " + file);
-        Path referenceFile = new Path(dstStoreDir, file.getName());
-        boolean success = true;
-        if (storeFile.isReference()) {
-          // write the Reference object to the snapshot
-          storeFile.getFileInfo().getReference().write(fs.getFileSystem(), referenceFile);
-        } else {
-          // create "reference" to this store file.  It is intentionally an empty file -- all
-          // necessary information is captured by its fs location and filename.  This allows us to
-          // only figure out what needs to be done via a single nn operation (instead of having to
-          // open and read the files as well).
-          success = fs.getFileSystem().createNewFile(referenceFile);
-        }
-        if (!success) {
-          throw new IOException("Failed to create reference file:" + referenceFile);
-        }
-      }
-    }
+    SnapshotManifest manifest = SnapshotManifest.create(conf, getFilesystem(),
+                                                        snapshotDir, desc, exnSnare);
+    manifest.addRegion(this);
   }
 
   /**
@@ -4005,14 +3959,14 @@ public class HRegion implements HeapSize
               isStopRow(nextKv.getRowArray(), nextKv.getRowOffset(), nextKv.getRowLength());
           // save that the row was empty before filters applied to it.
           final boolean isEmptyRow = results.isEmpty();
-          
+
           // We have the part of the row necessary for filtering (all of it, usually).
           // First filter with the filterRow(List).
           FilterWrapper.FilterRowRetCode ret = FilterWrapper.FilterRowRetCode.NOT_CALLED;
           if (filter != null && filter.hasFilterRow()) {
             ret = filter.filterRowCellsWithRet(results);
           }
-          
+
           if ((isEmptyRow || ret == FilterWrapper.FilterRowRetCode.EXCLUDE) || filterRow()) {
             results.clear();
             boolean moreRows = nextRow(currentRow, offset, length);
@@ -4080,7 +4034,7 @@ public class HRegion implements HeapSize
       return filter != null && (!filter.hasFilterRow())
           && filter.filterRow();
     }
-    
+
     private boolean filterRowKey(byte[] row, int offset, short length) throws IOException {
       return filter != null
           && filter.filterRowKey(row, offset, length);
@@ -5743,7 +5697,7 @@ public class HRegion implements HeapSize
    * modifies data. It has to be called just before a try.
    * #closeRegionOperation needs to be called in the try's finally block
    * Acquires a read lock and checks if the region is closing or closed.
-   * @throws IOException 
+   * @throws IOException
    */
   public void startRegionOperation() throws IOException {
     startRegionOperation(Operation.ANY);
@@ -5751,7 +5705,7 @@ public class HRegion implements HeapSize
 
   /**
    * @param op The operation is about to be taken on the region
-   * @throws IOException 
+   * @throws IOException
    */
   protected void startRegionOperation(Operation op) throws IOException {
     switch (op) {
@@ -5801,7 +5755,7 @@ public class HRegion implements HeapSize
   /**
    * Closes the lock. This needs to be called in the finally block corresponding
    * to the try block of #startRegionOperation
-   * @throws IOException 
+   * @throws IOException
    */
   public void closeRegionOperation() throws IOException {
     closeRegionOperation(Operation.ANY);

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java Wed May  7 21:28:12 2014
@@ -77,7 +77,7 @@ public class HRegionFileSystem {
   private final Configuration conf;
   private final Path tableDir;
   private final FileSystem fs;
-  
+
   /**
    * In order to handle NN connectivity hiccups, one need to retry non-idempotent operation at the
    * client level.
@@ -149,7 +149,7 @@ public class HRegionFileSystem {
    * @param familyName Column Family Name
    * @return {@link Path} to the directory of the specified family
    */
-  Path getStoreDir(final String familyName) {
+  public Path getStoreDir(final String familyName) {
     return new Path(this.getRegionDir(), familyName);
   }
 
@@ -176,20 +176,31 @@ public class HRegionFileSystem {
     return getStoreFiles(Bytes.toString(familyName));
   }
 
+  public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException {
+    return getStoreFiles(familyName, true);
+  }
+
   /**
    * Returns the store files available for the family.
    * This methods performs the filtering based on the valid store files.
    * @param familyName Column Family Name
    * @return a set of {@link StoreFileInfo} for the specified family.
    */
-  public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException {
+  public Collection<StoreFileInfo> getStoreFiles(final String familyName, final boolean validate)
+      throws IOException {
     Path familyDir = getStoreDir(familyName);
     FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
-    if (files == null) return null;
+    if (files == null) {
+      LOG.debug("No StoreFiles for: " + familyDir);
+      return null;
+    }
 
     ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(files.length);
     for (FileStatus status: files) {
-      if (!StoreFileInfo.isValid(status)) continue;
+      if (validate && !StoreFileInfo.isValid(status)) {
+        LOG.warn("Invalid StoreFile: " + status.getPath());
+        continue;
+      }
 
       storeFiles.add(new StoreFileInfo(this.conf, this.fs, status));
     }
@@ -354,7 +365,7 @@ public class HRegionFileSystem {
     Path storeDir = getStoreDir(familyName);
     if(!fs.exists(storeDir) && !createDir(storeDir))
       throw new IOException("Failed creating " + storeDir);
-    
+
     String name = buildPath.getName();
     if (generateNewName) {
       name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_");
@@ -556,16 +567,16 @@ public class HRegionFileSystem {
    */
   Path splitStoreFile(final HRegionInfo hri, final String familyName,
       final StoreFile f, final byte[] splitRow, final boolean top) throws IOException {
-    
+
     // Check whether the split row lies in the range of the store file
     // If it is outside the range, return directly.
     if (top) {
       //check if larger than last key.
       KeyValue splitKey = KeyValueUtil.createFirstOnRow(splitRow);
-      byte[] lastKey = f.createReader().getLastKey();      
+      byte[] lastKey = f.createReader().getLastKey();
       // If lastKey is null means storefile is empty.
       if (lastKey == null) return null;
-      if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(), 
+      if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
           splitKey.getKeyOffset(), splitKey.getKeyLength(), lastKey, 0, lastKey.length) > 0) {
         return null;
       }
@@ -575,14 +586,14 @@ public class HRegionFileSystem {
       byte[] firstKey = f.createReader().getFirstKey();
       // If firstKey is null means storefile is empty.
       if (firstKey == null) return null;
-      if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(), 
+      if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
           splitKey.getKeyOffset(), splitKey.getKeyLength(), firstKey, 0, firstKey.length) < 0) {
         return null;
-      }      
+      }
     }
- 
+
     f.getReader().close(true);
-    
+
     Path splitDir = new Path(getSplitsDir(hri), familyName);
     // A reference to the bottom half of the hsf store file.
     Reference r =
@@ -681,7 +692,7 @@ public class HRegionFileSystem {
    * Commit a merged region, moving it from the merges temporary directory to
    * the proper location in the filesystem.
    * @param mergedRegionInfo merged region {@link HRegionInfo}
-   * @throws IOException 
+   * @throws IOException
    */
   void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException {
     Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName());

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Wed May  7 21:28:12 2014
@@ -222,7 +222,7 @@ public class StoreFile {
    * @return the StoreFile object associated to this StoreFile.
    *         null if the StoreFile is not a reference.
    */
-  StoreFileInfo getFileInfo() {
+  public StoreFileInfo getFileInfo() {
     return this.fileInfo;
   }
 
@@ -614,7 +614,7 @@ public class StoreFile {
       if (comparator == null) {
         comparator = KeyValue.COMPARATOR;
       }
-      return new Writer(fs, filePath, 
+      return new Writer(fs, filePath,
           conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext);
     }
   }
@@ -694,7 +694,7 @@ public class StoreFile {
 
     /** Bytes per Checksum */
     protected int bytesPerChecksum;
-    
+
     TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
     /* isTimeRangeTrackerSet keeps track if the timeRange has already been set
      * When flushing a memstore, we set TimeRange and use this variable to
@@ -723,7 +723,7 @@ public class StoreFile {
         final Configuration conf,
         CacheConfig cacheConf,
         final KVComparator comparator, BloomType bloomType, long maxKeys,
-        InetSocketAddress[] favoredNodes, HFileContext fileContext) 
+        InetSocketAddress[] favoredNodes, HFileContext fileContext)
             throws IOException {
       writer = HFile.getWriterFactory(conf, cacheConf)
           .withPath(fs, path)

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java?rev=1593139&r1=1593138&r2=1593139&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java Wed May  7 21:28:12 2014
@@ -144,7 +144,7 @@ public class StoreFileInfo {
    * @return the Reference object associated to this StoreFileInfo.
    *         null if the StoreFile is not a reference.
    */
-  Reference getReference() {
+  public Reference getReference() {
     return this.reference;
   }