You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jm...@apache.org on 2013/02/14 14:35:59 UTC

svn commit: r1446173 [4/5] - in /hbase/branches/hbase-7290v2: ./ bin/ hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/ hbase-common/src/main/java/org/apache/hadoop/hbase/util/ hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/...

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java Thu Feb 14 13:35:54 2013
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
 import java.io.PrintWriter;
+import java.io.StringWriter;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -240,7 +241,7 @@ public class HBaseFsck extends Configure
    * When initially looking at HDFS, we attempt to find any orphaned data.
    */
   private List<HbckInfo> orphanHdfsDirs = Collections.synchronizedList(new ArrayList<HbckInfo>());
-  
+
   private Map<String, Set<String>> orphanTableDirs = new HashMap<String, Set<String>>();
 
   /**
@@ -399,7 +400,7 @@ public class HBaseFsck extends Configure
     if (!checkMetaOnly) {
       reportTablesInFlux();
     }
-    
+
     // get regions according to what is online on each RegionServer
     loadDeployedRegions();
 
@@ -797,19 +798,21 @@ public class HBaseFsck extends Configure
           if (!orphanTableDirs.containsKey(tableName)) {
             LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe);
             //should only report once for each table
-            errors.reportError(ERROR_CODE.NO_TABLEINFO_FILE, 
+            errors.reportError(ERROR_CODE.NO_TABLEINFO_FILE,
                 "Unable to read .tableinfo from " + hbaseRoot + "/" + tableName);
             Set<String> columns = new HashSet<String>();
             orphanTableDirs.put(tableName, getColumnFamilyList(columns, hbi));
           }
         }
       }
-      modTInfo.addRegionInfo(hbi);
+      if (!hbi.isSkipChecks()) {
+        modTInfo.addRegionInfo(hbi);
+      }
     }
 
     return tablesInfo;
   }
-  
+
   /**
    * To get the column family list according to the column family dirs
    * @param columns
@@ -827,7 +830,7 @@ public class HBaseFsck extends Configure
     }
     return columns;
   }
-  
+
   /**
    * To fabricate a .tableinfo file with following contents<br>
    * 1. the correct tablename <br>
@@ -845,7 +848,7 @@ public class HBaseFsck extends Configure
     FSTableDescriptors.createTableDescriptor(htd, getConf(), true);
     return true;
   }
-  
+
   /**
    * To fix orphan table by creating a .tableinfo file under tableDir <br>
    * 1. if TableInfo is cached, to recover the .tableinfo accordingly <br>
@@ -1416,8 +1419,9 @@ public class HBaseFsck extends Configure
         FSUtils.checkAccess(ugi, file, FsAction.WRITE);
       } catch (AccessControlException ace) {
         LOG.warn("Got AccessControlException when preCheckPermission ", ace);
-        System.err.println("Current user " + ugi.getUserName() + " does not have write perms to " + file.getPath()
-            + ". Please rerun hbck as hdfs user " + file.getOwner());
+        errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName()
+          + " does not have write perms to " + file.getPath()
+          + ". Please rerun hbck as hdfs user " + file.getOwner());
         throw new AccessControlException(ace);
       }
     }
@@ -1651,6 +1655,18 @@ public class HBaseFsck extends Configure
 
     // ========== Cases where the region is in META =============
     } else if (inMeta && inHdfs && !isDeployed && splitParent) {
+      // check whether this is an actual error, or just transient state where parent
+      // is not cleaned
+      if (hbi.metaEntry.splitA != null && hbi.metaEntry.splitB != null) {
+        // check that split daughters are there
+        HbckInfo infoA = this.regionInfoMap.get(hbi.metaEntry.splitA.getEncodedName());
+        HbckInfo infoB = this.regionInfoMap.get(hbi.metaEntry.splitB.getEncodedName());
+        if (infoA != null && infoB != null) {
+          // we already processed or will process daughters. Move on, nothing to see here.
+          hbi.setSkipChecks(true);
+          return;
+        }
+      }
       errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
           + descriptiveName + " is a split parent in META, in HDFS, "
           + "and not deployed on any region server. This could be transient.");
@@ -1781,7 +1797,9 @@ public class HBaseFsck extends Configure
         modTInfo.addServer(server);
       }
 
-      modTInfo.addRegionInfo(hbi);
+      if (!hbi.isSkipChecks()) {
+        modTInfo.addRegionInfo(hbi);
+      }
 
       tablesInfo.put(tableName, modTInfo);
     }
@@ -2319,19 +2337,19 @@ public class HBaseFsck extends Configure
 
       if (details) {
         // do full region split map dump
-        System.out.println("---- Table '"  +  this.tableName
+        errors.print("---- Table '"  +  this.tableName
             + "': region split map");
         dump(splits, regions);
-        System.out.println("---- Table '"  +  this.tableName
+        errors.print("---- Table '"  +  this.tableName
             + "': overlap groups");
         dumpOverlapProblems(overlapGroups);
-        System.out.println("There are " + overlapGroups.keySet().size()
+        errors.print("There are " + overlapGroups.keySet().size()
             + " overlap groups with " + overlapGroups.size()
             + " overlapping regions");
       }
       if (!sidelinedRegions.isEmpty()) {
         LOG.warn("Sidelined big overlapped regions, please bulk load them!");
-        System.out.println("---- Table '"  +  this.tableName
+        errors.print("---- Table '"  +  this.tableName
             + "': sidelined big overlapped regions");
         dumpSidelinedRegions(sidelinedRegions);
       }
@@ -2346,13 +2364,15 @@ public class HBaseFsck extends Configure
      */
     void dump(SortedSet<byte[]> splits, Multimap<byte[], HbckInfo> regions) {
       // we display this way because the last end key should be displayed as well.
+      StringBuilder sb = new StringBuilder();
       for (byte[] k : splits) {
-        System.out.print(Bytes.toStringBinary(k) + ":\t");
+        sb.setLength(0); // clear out existing buffer, if any.
+        sb.append(Bytes.toStringBinary(k) + ":\t");
         for (HbckInfo r : regions.get(k)) {
-          System.out.print("[ "+ r.toString() + ", "
+          sb.append("[ "+ r.toString() + ", "
               + Bytes.toStringBinary(r.getEndKey())+ "]\t");
         }
-        System.out.println();
+        errors.print(sb.toString());
       }
     }
   }
@@ -2361,12 +2381,12 @@ public class HBaseFsck extends Configure
     // we display this way because the last end key should be displayed as
     // well.
     for (byte[] k : regions.keySet()) {
-      System.out.print(Bytes.toStringBinary(k) + ":\n");
+      errors.print(Bytes.toStringBinary(k) + ":");
       for (HbckInfo r : regions.get(k)) {
-        System.out.print("[ " + r.toString() + ", "
-            + Bytes.toStringBinary(r.getEndKey()) + "]\n");
+        errors.print("[ " + r.toString() + ", "
+            + Bytes.toStringBinary(r.getEndKey()) + "]");
       }
-      System.out.println("----");
+      errors.print("----");
     }
   }
 
@@ -2374,9 +2394,9 @@ public class HBaseFsck extends Configure
     for (Map.Entry<Path, HbckInfo> entry: regions.entrySet()) {
       String tableName = Bytes.toStringBinary(entry.getValue().getTableName());
       Path path = entry.getKey();
-      System.out.println("This sidelined region dir should be bulk loaded: "
+      errors.print("This sidelined region dir should be bulk loaded: "
         + path.toString());
-      System.out.println("Bulk load command looks like: "
+      errors.print("Bulk load command looks like: "
         + "hbase org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles "
         + path.toUri().getPath() + " "+ tableName);
     }
@@ -2543,7 +2563,8 @@ public class HBaseFsck extends Configure
               || hri.isMetaRegion() || hri.isRootRegion())) {
             return true;
           }
-          MetaEntry m = new MetaEntry(hri, sn, ts);
+          PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(result);
+          MetaEntry m = new MetaEntry(hri, sn, ts, daughters.getFirst(), daughters.getSecond());
           HbckInfo hbInfo = new HbckInfo(m);
           HbckInfo previous = regionInfoMap.put(hri.getEncodedName(), hbInfo);
           if (previous != null) {
@@ -2582,11 +2603,19 @@ public class HBaseFsck extends Configure
   static class MetaEntry extends HRegionInfo {
     ServerName regionServer;   // server hosting this region
     long modTime;          // timestamp of most recent modification metadata
+    HRegionInfo splitA, splitB; //split daughters
 
     public MetaEntry(HRegionInfo rinfo, ServerName regionServer, long modTime) {
+      this(rinfo, regionServer, modTime, null, null);
+    }
+
+    public MetaEntry(HRegionInfo rinfo, ServerName regionServer, long modTime,
+        HRegionInfo splitA, HRegionInfo splitB) {
       super(rinfo);
       this.regionServer = regionServer;
       this.modTime = modTime;
+      this.splitA = splitA;
+      this.splitB = splitB;
     }
 
     public boolean equals(Object o) {
@@ -2635,6 +2664,7 @@ public class HBaseFsck extends Configure
     private HdfsEntry hdfsEntry = null; // info in HDFS
     private List<OnlineEntry> deployedEntries = Lists.newArrayList(); // on Region Server
     private List<ServerName> deployedOn = Lists.newArrayList(); // info on RS's
+    private boolean skipChecks = false; // whether to skip further checks to this region info.
 
     HbckInfo(MetaEntry metaEntry) {
       this.metaEntry = metaEntry;
@@ -2752,6 +2782,14 @@ public class HBaseFsck extends Configure
       }
       return hdfsEntry.hri;
     }
+
+    public void setSkipChecks(boolean skipChecks) {
+      this.skipChecks = skipChecks;
+    }
+
+    public boolean isSkipChecks() {
+      return skipChecks;
+    }
   }
 
   final static Comparator<HbckInfo> cmp = new Comparator<HbckInfo>() {
@@ -2807,23 +2845,25 @@ public class HBaseFsck extends Configure
    * Prints summary of all tables found on the system.
    */
   private void printTableSummary(SortedMap<String, TableInfo> tablesInfo) {
-    System.out.println("Summary:");
+    StringBuilder sb = new StringBuilder();
+    errors.print("Summary:");
     for (TableInfo tInfo : tablesInfo.values()) {
       if (errors.tableHasErrors(tInfo)) {
-        System.out.println("Table " + tInfo.getName() + " is inconsistent.");
+        errors.print("Table " + tInfo.getName() + " is inconsistent.");
       } else {
-        System.out.println("  " + tInfo.getName() + " is okay.");
+        errors.print("  " + tInfo.getName() + " is okay.");
       }
-      System.out.println("    Number of regions: " + tInfo.getNumRegions());
-      System.out.print("    Deployed on: ");
+      errors.print("    Number of regions: " + tInfo.getNumRegions());
+      sb.setLength(0); // clear out existing buffer, if any.
+      sb.append("    Deployed on: ");
       for (ServerName server : tInfo.deployedOn) {
-        System.out.print(" " + server.toString());
+        sb.append(" " + server.toString());
       }
-      System.out.println();
+      errors.print(sb.toString());
     }
   }
 
-  private static ErrorReporter getErrorReporter(
+  static ErrorReporter getErrorReporter(
       final Configuration conf) throws ClassNotFoundException {
     Class<? extends ErrorReporter> reporter = conf.getClass("hbasefsck.errorreporter", PrintingErrorReporter.class, ErrorReporter.class);
     return (ErrorReporter)ReflectionUtils.newInstance(reporter, conf);
@@ -2836,7 +2876,8 @@ public class HBaseFsck extends Configure
       MULTI_DEPLOYED, SHOULD_NOT_BE_DEPLOYED, MULTI_META_REGION, RS_CONNECT_FAILURE,
       FIRST_REGION_STARTKEY_NOT_EMPTY, LAST_REGION_ENDKEY_NOT_EMPTY, DUPE_STARTKEYS,
       HOLE_IN_REGION_CHAIN, OVERLAP_IN_REGION_CHAIN, REGION_CYCLE, DEGENERATE_REGION,
-      ORPHAN_HDFS_REGION, LINGERING_SPLIT_PARENT, NO_TABLEINFO_FILE, LINGERING_REFERENCE_HFILE
+      ORPHAN_HDFS_REGION, LINGERING_SPLIT_PARENT, NO_TABLEINFO_FILE, LINGERING_REFERENCE_HFILE,
+      WRONG_USAGE
     }
     public void clear();
     public void report(String message);
@@ -2870,6 +2911,11 @@ public class HBaseFsck extends Configure
     }
 
     public synchronized void reportError(ERROR_CODE errorCode, String message) {
+      if (errorCode == ERROR_CODE.WRONG_USAGE) {
+        System.err.println(message);
+        return;
+      }
+
       errorList.add(errorCode);
       if (!summary) {
         System.out.println("ERROR: " + message);
@@ -3220,15 +3266,15 @@ public class HBaseFsck extends Configure
   boolean shouldFixHdfsHoles() {
     return fixHdfsHoles;
   }
-  
+
   public void setFixTableOrphans(boolean shouldFix) {
     fixTableOrphans = shouldFix;
   }
-   
+
   boolean shouldFixTableOrphans() {
     return fixTableOrphans;
   }
-  
+
   public void setFixHdfsOverlaps(boolean shouldFix) {
     fixHdfsOverlaps = shouldFix;
   }
@@ -3358,48 +3404,53 @@ public class HBaseFsck extends Configure
   }
 
   protected HBaseFsck printUsageAndExit() {
-    System.err.println("Usage: fsck [opts] {only tables}");
-    System.err.println(" where [opts] are:");
-    System.err.println("   -help Display help options (this)");
-    System.err.println("   -details Display full report of all regions.");
-    System.err.println("   -timelag <timeInSeconds>  Process only regions that " +
+    StringWriter sw = new StringWriter(2048);
+    PrintWriter out = new PrintWriter(sw);
+    out.println("Usage: fsck [opts] {only tables}");
+    out.println(" where [opts] are:");
+    out.println("   -help Display help options (this)");
+    out.println("   -details Display full report of all regions.");
+    out.println("   -timelag <timeInSeconds>  Process only regions that " +
                        " have not experienced any metadata updates in the last " +
                        " <timeInSeconds> seconds.");
-    System.err.println("   -sleepBeforeRerun <timeInSeconds> Sleep this many seconds" +
+    out.println("   -sleepBeforeRerun <timeInSeconds> Sleep this many seconds" +
         " before checking if the fix worked if run with -fix");
-    System.err.println("   -summary Print only summary of the tables and status.");
-    System.err.println("   -metaonly Only check the state of ROOT and META tables.");
-    System.err.println("   -sidelineDir <hdfs://> HDFS path to backup existing meta and root.");
-
-    System.err.println("");
-    System.err.println("  Metadata Repair options: (expert features, use with caution!)");
-    System.err.println("   -fix              Try to fix region assignments.  This is for backwards compatiblity");
-    System.err.println("   -fixAssignments   Try to fix region assignments.  Replaces the old -fix");
-    System.err.println("   -fixMeta          Try to fix meta problems.  This assumes HDFS region info is good.");
-    System.err.println("   -noHdfsChecking   Don't load/check region info from HDFS."
+    out.println("   -summary Print only summary of the tables and status.");
+    out.println("   -metaonly Only check the state of ROOT and META tables.");
+    out.println("   -sidelineDir <hdfs://> HDFS path to backup existing meta and root.");
+
+    out.println("");
+    out.println("  Metadata Repair options: (expert features, use with caution!)");
+    out.println("   -fix              Try to fix region assignments.  This is for backwards compatiblity");
+    out.println("   -fixAssignments   Try to fix region assignments.  Replaces the old -fix");
+    out.println("   -fixMeta          Try to fix meta problems.  This assumes HDFS region info is good.");
+    out.println("   -noHdfsChecking   Don't load/check region info from HDFS."
         + " Assumes META region info is good. Won't check/fix any HDFS issue, e.g. hole, orphan, or overlap");
-    System.err.println("   -fixHdfsHoles     Try to fix region holes in hdfs.");
-    System.err.println("   -fixHdfsOrphans   Try to fix region dirs with no .regioninfo file in hdfs");
-    System.err.println("   -fixTableOrphans  Try to fix table dirs with no .tableinfo file in hdfs (online mode only)");
-    System.err.println("   -fixHdfsOverlaps  Try to fix region overlaps in hdfs.");
-    System.err.println("   -fixVersionFile   Try to fix missing hbase.version file in hdfs.");
-    System.err.println("   -maxMerge <n>     When fixing region overlaps, allow at most <n> regions to merge. (n=" + DEFAULT_MAX_MERGE +" by default)");
-    System.err.println("   -sidelineBigOverlaps  When fixing region overlaps, allow to sideline big overlaps");
-    System.err.println("   -maxOverlapsToSideline <n>  When fixing region overlaps, allow at most <n> regions to sideline per group. (n=" + DEFAULT_OVERLAPS_TO_SIDELINE +" by default)");
-    System.err.println("   -fixSplitParents  Try to force offline split parents to be online.");
-    System.err.println("   -ignorePreCheckPermission  ignore filesystem permission pre-check");
-    System.err.println("   -fixReferenceFiles  Try to offline lingering reference store files");
-
-    System.err.println("");
-    System.err.println("  Datafile Repair options: (expert features, use with caution!)");
-    System.err.println("   -checkCorruptHFiles     Check all Hfiles by opening them to make sure they are valid");
-    System.err.println("   -sidelineCorruptHfiles  Quarantine corrupted HFiles.  implies -checkCorruptHfiles");
-
-    System.err.println("");
-    System.err.println("  Metadata Repair shortcuts");
-    System.err.println("   -repair           Shortcut for -fixAssignments -fixMeta -fixHdfsHoles " +
+    out.println("   -fixHdfsHoles     Try to fix region holes in hdfs.");
+    out.println("   -fixHdfsOrphans   Try to fix region dirs with no .regioninfo file in hdfs");
+    out.println("   -fixTableOrphans  Try to fix table dirs with no .tableinfo file in hdfs (online mode only)");
+    out.println("   -fixHdfsOverlaps  Try to fix region overlaps in hdfs.");
+    out.println("   -fixVersionFile   Try to fix missing hbase.version file in hdfs.");
+    out.println("   -maxMerge <n>     When fixing region overlaps, allow at most <n> regions to merge. (n=" + DEFAULT_MAX_MERGE +" by default)");
+    out.println("   -sidelineBigOverlaps  When fixing region overlaps, allow to sideline big overlaps");
+    out.println("   -maxOverlapsToSideline <n>  When fixing region overlaps, allow at most <n> regions to sideline per group. (n=" + DEFAULT_OVERLAPS_TO_SIDELINE +" by default)");
+    out.println("   -fixSplitParents  Try to force offline split parents to be online.");
+    out.println("   -ignorePreCheckPermission  ignore filesystem permission pre-check");
+    out.println("   -fixReferenceFiles  Try to offline lingering reference store files");
+
+    out.println("");
+    out.println("  Datafile Repair options: (expert features, use with caution!)");
+    out.println("   -checkCorruptHFiles     Check all Hfiles by opening them to make sure they are valid");
+    out.println("   -sidelineCorruptHfiles  Quarantine corrupted HFiles.  implies -checkCorruptHfiles");
+
+    out.println("");
+    out.println("  Metadata Repair shortcuts");
+    out.println("   -repair           Shortcut for -fixAssignments -fixMeta -fixHdfsHoles " +
         "-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps -fixReferenceFiles");
-    System.err.println("   -repairHoles      Shortcut for -fixAssignments -fixMeta -fixHdfsHoles");
+    out.println("   -repairHoles      Shortcut for -fixAssignments -fixMeta -fixHdfsHoles");
+
+    out.flush();
+    errors.reportError(ERROR_CODE.WRONG_USAGE, sw.toString());
 
     setRetCode(-2);
     return this;
@@ -3445,39 +3496,40 @@ public class HBaseFsck extends Configure
         setDisplayFullReport();
       } else if (cmd.equals("-timelag")) {
         if (i == args.length - 1) {
-          System.err.println("HBaseFsck: -timelag needs a value.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -timelag needs a value.");
           return printUsageAndExit();
         }
         try {
           long timelag = Long.parseLong(args[i+1]);
           setTimeLag(timelag);
         } catch (NumberFormatException e) {
-          System.err.println("-timelag needs a numeric value.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE, "-timelag needs a numeric value.");
           return printUsageAndExit();
         }
         i++;
       } else if (cmd.equals("-sleepBeforeRerun")) {
         if (i == args.length - 1) {
-          System.err.println("HBaseFsck: -sleepBeforeRerun needs a value.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE,
+            "HBaseFsck: -sleepBeforeRerun needs a value.");
           return printUsageAndExit();
         }
         try {
           sleepBeforeRerun = Long.parseLong(args[i+1]);
         } catch (NumberFormatException e) {
-          System.err.println("-sleepBeforeRerun needs a numeric value.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE, "-sleepBeforeRerun needs a numeric value.");
           return printUsageAndExit();
         }
         i++;
       } else if (cmd.equals("-sidelineDir")) {
         if (i == args.length - 1) {
-          System.err.println("HBaseFsck: -sidelineDir needs a value.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -sidelineDir needs a value.");
           return printUsageAndExit();
         }
         i++;
         setSidelineDir(args[i]);
       } else if (cmd.equals("-fix")) {
-        System.err.println("This option is deprecated, please use " +
-          "-fixAssignments instead.");
+        errors.reportError(ERROR_CODE.WRONG_USAGE,
+          "This option is deprecated, please use  -fixAssignments instead.");
         setFixAssignments(true);
       } else if (cmd.equals("-fixAssignments")) {
         setFixAssignments(true);
@@ -3532,27 +3584,31 @@ public class HBaseFsck extends Configure
         setCheckHdfs(true);
       } else if (cmd.equals("-maxOverlapsToSideline")) {
         if (i == args.length - 1) {
-          System.err.println("-maxOverlapsToSideline needs a numeric value argument.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE,
+            "-maxOverlapsToSideline needs a numeric value argument.");
           return printUsageAndExit();
         }
         try {
           int maxOverlapsToSideline = Integer.parseInt(args[i+1]);
           setMaxOverlapsToSideline(maxOverlapsToSideline);
         } catch (NumberFormatException e) {
-          System.err.println("-maxOverlapsToSideline needs a numeric value argument.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE,
+            "-maxOverlapsToSideline needs a numeric value argument.");
           return printUsageAndExit();
         }
         i++;
       } else if (cmd.equals("-maxMerge")) {
         if (i == args.length - 1) {
-          System.err.println("-maxMerge needs a numeric value argument.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE,
+            "-maxMerge needs a numeric value argument.");
           return printUsageAndExit();
         }
         try {
           int maxMerge = Integer.parseInt(args[i+1]);
           setMaxMerge(maxMerge);
         } catch (NumberFormatException e) {
-          System.err.println("-maxMerge needs a numeric value argument.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE,
+            "-maxMerge needs a numeric value argument.");
           return printUsageAndExit();
         }
         i++;
@@ -3561,11 +3617,11 @@ public class HBaseFsck extends Configure
       } else if (cmd.equals("-metaonly")) {
         setCheckMetaOnly();
       } else if (cmd.startsWith("-")) {
-        System.err.println("Unrecognized option:" + cmd);
+        errors.reportError(ERROR_CODE.WRONG_USAGE, "Unrecognized option:" + cmd);
         return printUsageAndExit();
       } else {
         includeTable(cmd);
-        System.out.println("Allow checking/fixes for table: " + cmd);
+        errors.print("Allow checking/fixes for table: " + cmd);
       }
     }
 
@@ -3597,9 +3653,7 @@ public class HBaseFsck extends Configure
         tableDirs = FSUtils.getTableDirs(FSUtils.getCurrentFileSystem(getConf()), rootdir);
       }
       hfcc.checkTables(tableDirs);
-      PrintWriter out = new PrintWriter(System.out);
-      hfcc.report(out);
-      out.flush();
+      hfcc.report(errors);
     }
 
     // check and fix table integrity, region consistency.
@@ -3634,13 +3688,22 @@ public class HBaseFsck extends Configure
    * ls -r for debugging purposes
    */
   void debugLsr(Path p) throws IOException {
-    debugLsr(getConf(), p);
+    debugLsr(getConf(), p, errors);
+  }
+
+  /**
+   * ls -r for debugging purposes
+   */
+  public static void debugLsr(Configuration conf,
+      Path p) throws IOException {
+    debugLsr(conf, p, new PrintingErrorReporter());
   }
 
   /**
    * ls -r for debugging purposes
    */
-  public static void debugLsr(Configuration conf, Path p) throws IOException {
+  public static void debugLsr(Configuration conf,
+      Path p, ErrorReporter errors) throws IOException {
     if (!LOG.isDebugEnabled() || p == null) {
       return;
     }
@@ -3650,7 +3713,7 @@ public class HBaseFsck extends Configure
       // nothing
       return;
     }
-    System.out.println(p);
+    errors.print(p.toString());
 
     if (fs.isFile(p)) {
       return;
@@ -3659,7 +3722,7 @@ public class HBaseFsck extends Configure
     if (fs.getFileStatus(p).isDir()) {
       FileStatus[] fss= fs.listStatus(p);
       for (FileStatus status : fss) {
-        debugLsr(conf, status.getPath());
+        debugLsr(conf, status.getPath(), errors);
       }
     }
   }

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java Thu Feb 14 13:35:54 2013
@@ -138,7 +138,7 @@ class HMerge {
   private static abstract class Merger {
     protected final Configuration conf;
     protected final FileSystem fs;
-    protected final Path tabledir;
+    protected final Path rootDir;
     protected final HTableDescriptor htd;
     protected final HLog hlog;
     private final long maxFilesize;
@@ -151,11 +151,9 @@ class HMerge {
       this.maxFilesize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
           HConstants.DEFAULT_MAX_FILE_SIZE);
 
-      this.tabledir = new Path(
-          fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))),
-          Bytes.toString(tableName)
-      );
-      this.htd = FSTableDescriptors.getTableDescriptor(this.fs, this.tabledir);
+      this.rootDir = FSUtils.getRootDir(conf);
+      Path tabledir = HTableDescriptor.getTableDir(this.rootDir, tableName);
+      this.htd = FSTableDescriptors.getTableDescriptor(this.fs, tabledir);
       String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;
 
       this.hlog = HLogFactory.createHLog(fs, tabledir, logname, conf);
@@ -192,14 +190,10 @@ class HMerge {
       long nextSize = 0;
       for (int i = 0; i < info.length - 1; i++) {
         if (currentRegion == null) {
-          currentRegion = HRegion.newHRegion(tabledir, hlog, fs, conf, info[i],
-            this.htd, null);
-          currentRegion.initialize();
+          currentRegion = HRegion.openHRegion(conf, fs, this.rootDir, info[i], this.htd, hlog);
           currentSize = currentRegion.getLargestHStoreSize();
         }
-        nextRegion = HRegion.newHRegion(tabledir, hlog, fs, conf, info[i + 1],
-          this.htd, null);
-        nextRegion.initialize();
+        nextRegion = HRegion.openHRegion(conf, fs, this.rootDir, info[i + 1], this.htd, hlog);
         nextSize = nextRegion.getLargestHStoreSize();
 
         if ((currentSize + nextSize) <= (maxFilesize / 2)) {
@@ -349,21 +343,15 @@ class HMerge {
         throws IOException {
       super(conf, fs, HConstants.META_TABLE_NAME);
 
-      Path rootTableDir = HTableDescriptor.getTableDir(
-          fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))),
-          HConstants.ROOT_TABLE_NAME);
+      Path rootDir = FSUtils.getRootDir(conf);
 
       // Scan root region to find all the meta regions
-
-      root = HRegion.newHRegion(rootTableDir, hlog, fs, conf,
-          HRegionInfo.ROOT_REGIONINFO, HTableDescriptor.ROOT_TABLEDESC, null);
-      root.initialize();
+      root = HRegion.openHRegion(conf, fs, rootDir, HRegionInfo.ROOT_REGIONINFO,
+          HTableDescriptor.ROOT_TABLEDESC, hlog);
 
       Scan scan = new Scan();
-      scan.addColumn(HConstants.CATALOG_FAMILY,
-          HConstants.REGIONINFO_QUALIFIER);
-      InternalScanner rootScanner =
-        root.getScanner(scan);
+      scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+      InternalScanner rootScanner = root.getScanner(scan);
 
       try {
         List<KeyValue> results = new ArrayList<KeyValue>();

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java Thu Feb 14 13:35:54 2013
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.util.hbc
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.PrintWriter;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
@@ -46,7 +45,7 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.util.FSUtils.FamilyDirFilter;
 import org.apache.hadoop.hbase.util.FSUtils.HFileFilter;
 import org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter;
-import org.apache.hadoop.hbase.util.HBaseFsck;
+import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
 
 /**
  * This class marches through all of the region's hfiles and verifies that
@@ -341,22 +340,22 @@ public class HFileCorruptionChecker {
    * Print a human readable summary of hfile quarantining operations.
    * @param out
    */
-  public void report(PrintWriter out) {
-    out.println("Checked " + hfilesChecked.get() + " hfile for corruption");
-    out.println("  HFiles corrupted:                  " + corrupted.size());
+  public void report(ErrorReporter out) {
+    out.print("Checked " + hfilesChecked.get() + " hfile for corruption");
+    out.print("  HFiles corrupted:                  " + corrupted.size());
     if (inQuarantineMode) {
-      out.println("    HFiles successfully quarantined: " + quarantined.size());
+      out.print("    HFiles successfully quarantined: " + quarantined.size());
       for (Path sq : quarantined) {
-        out.println("      " + sq);
+        out.print("      " + sq);
       }
-      out.println("    HFiles failed quarantine:        " + failures.size());
+      out.print("    HFiles failed quarantine:        " + failures.size());
       for (Path fq : failures) {
-        out.println("      " + fq);
+        out.print("      " + fq);
       }
     }
-    out.println("    HFiles moved while checking:     " + missing.size());
+    out.print("    HFiles moved while checking:     " + missing.size());
     for (Path mq : missing) {
-      out.println("      " + mq);
+      out.print("      " + mq);
     }
 
     String initialState = (corrupted.size() == 0) ? "OK" : "CORRUPTED";
@@ -364,9 +363,9 @@ public class HFileCorruptionChecker {
         : "CORRUPTED";
 
     if (inQuarantineMode) {
-      out.println("Summary: " + initialState + " => " + fixedState);
+      out.print("Summary: " + initialState + " => " + fixedState);
     } else {
-      out.println("Summary: " + initialState);
+      out.print("Summary: " + initialState);
     }
   }
 }

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java Thu Feb 14 13:35:54 2013
@@ -198,7 +198,13 @@ public class ZKUtil {
     if (System.getProperty("java.security.auth.login.config") != null)
       return;
 
+    // No keytab specified, no auth
     String keytabFilename = conf.get(keytabFileKey);
+    if (keytabFilename == null) {
+      LOG.warn("no keytab specified for: " + keytabFileKey);
+      return;
+    }
+
     String principalConfig = conf.get(userNameKey, System.getProperty("user.name"));
     String principalName = SecurityUtil.getServerPrincipal(principalConfig, hostname);
 
@@ -206,7 +212,7 @@ public class ZKUtil {
     // If keyTab is not specified use the Ticket Cache.
     // and set the zookeeper login context name.
     JaasConfiguration jaasConf = new JaasConfiguration(loginContextName,
-      keytabFilename, principalName);
+        principalName, keytabFilename);
     javax.security.auth.login.Configuration.setConfiguration(jaasConf);
     System.setProperty(loginContextProperty, loginContextName);
   }
@@ -913,7 +919,8 @@ public class ZKUtil {
       return true;
 
     // Master & RSs uses hbase.zookeeper.client.*
-    return "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication"));
+    return("kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication")) &&
+         conf.get("hbase.zookeeper.client.keytab.file") != null);
   }
 
   private static ArrayList<ACL> createACL(ZooKeeperWatcher zkw, String node) {
@@ -936,15 +943,6 @@ public class ZKUtil {
     }
   }
 
-  public static void waitForZKConnectionIfAuthenticating(ZooKeeperWatcher zkw)
-      throws InterruptedException {
-    if (isSecureZooKeeper(zkw.getConfiguration())) {
-       LOG.debug("Waiting for ZooKeeperWatcher to authenticate");
-       zkw.saslLatch.await();
-       LOG.debug("Done waiting.");
-    }
-  }
-
   //
   // Node creation
   //
@@ -971,7 +969,6 @@ public class ZKUtil {
       String znode, byte [] data)
   throws KeeperException {
     try {
-      waitForZKConnectionIfAuthenticating(zkw);
       zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode),
           CreateMode.EPHEMERAL);
     } catch (KeeperException.NodeExistsException nee) {
@@ -1011,7 +1008,6 @@ public class ZKUtil {
       ZooKeeperWatcher zkw, String znode, byte [] data)
   throws KeeperException {
     try {
-      waitForZKConnectionIfAuthenticating(zkw);
       zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode),
           CreateMode.PERSISTENT);
     } catch (KeeperException.NodeExistsException nee) {
@@ -1049,7 +1045,6 @@ public class ZKUtil {
       String znode, byte [] data)
   throws KeeperException, KeeperException.NodeExistsException {
     try {
-      waitForZKConnectionIfAuthenticating(zkw);
       zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode),
           CreateMode.PERSISTENT);
       Stat stat = zkw.getRecoverableZooKeeper().exists(znode, zkw);
@@ -1083,13 +1078,8 @@ public class ZKUtil {
   public static void asyncCreate(ZooKeeperWatcher zkw,
       String znode, byte [] data, final AsyncCallback.StringCallback cb,
       final Object ctx) {
-    try {
-      waitForZKConnectionIfAuthenticating(zkw);
-      zkw.getRecoverableZooKeeper().getZooKeeper().create(znode, data,
-          createACL(zkw, znode), CreateMode.PERSISTENT, cb, ctx);
-    } catch (InterruptedException e) {
-      zkw.interruptedException(e);
-    }
+    zkw.getRecoverableZooKeeper().getZooKeeper().create(znode, data,
+        createACL(zkw, znode), CreateMode.PERSISTENT, cb, ctx);
   }
 
   /**
@@ -1131,7 +1121,6 @@ public class ZKUtil {
     String znode = create.getPath();
     try {
       RecoverableZooKeeper zk = zkw.getRecoverableZooKeeper();
-      waitForZKConnectionIfAuthenticating(zkw);
       if (zk.exists(znode, false) == null) {
         zk.create(znode, create.getData(), create.getAcl(), CreateMode.fromFlag(create.getFlags()));
       }
@@ -1186,7 +1175,6 @@ public class ZKUtil {
       if(znode == null) {
         return;
       }
-      waitForZKConnectionIfAuthenticating(zkw);
       zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode),
           CreateMode.PERSISTENT);
     } catch(KeeperException.NodeExistsException nee) {

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java Thu Feb 14 13:35:54 2013
@@ -74,12 +74,6 @@ public abstract class ZooKeeperNodeTrack
    * or {@link #getData(boolean)} to get the data of the node if it is available.
    */
   public synchronized void start() {
-    try {
-      ZKUtil.waitForZKConnectionIfAuthenticating(watcher);
-    } catch (InterruptedException e) {
-      throw new IllegalStateException("ZookeeperNodeTracker on " + this.node 
-          + " interuppted while waiting for SASL Authentication", e);
-    }
     this.watcher.registerListener(this);
     try {
       if(ZKUtil.watchAndCheckExists(watcher, node)) {

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java Thu Feb 14 13:35:54 2013
@@ -342,34 +342,12 @@ public class ZooKeeperWatcher implements
         LOG.debug(this.identifier + " connected");
         break;
 
-      case SaslAuthenticated:
-        if (ZKUtil.isSecureZooKeeper(this.conf)) {
-          // We are authenticated, clients can proceed.
-          saslLatch.countDown();
-        }
-        break;
-
-      case AuthFailed:
-        if (ZKUtil.isSecureZooKeeper(this.conf)) {
-          // We could not be authenticated, but clients should proceed anyway.
-          // Only access to znodes that require SASL authentication will be
-          // denied. The client may never need to access them.
-          saslLatch.countDown();
-        }
-        break;
-
       // Abort the server if Disconnected or Expired
       case Disconnected:
         LOG.debug(prefix("Received Disconnected from ZooKeeper, ignoring"));
         break;
 
       case Expired:
-        if (ZKUtil.isSecureZooKeeper(this.conf)) {
-          // We consider Expired equivalent to AuthFailed for this
-          // connection. Authentication is never going to complete. The
-          // client should proceed to do cleanup.
-          saslLatch.countDown();
-        }
         String msg = prefix(this.identifier + " received expired from " +
           "ZooKeeper, aborting");
         // TODO: One thought is to add call to ZooKeeperListener so say,

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/resources/hbase-webapps/static/hbase_logo.png
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/resources/hbase-webapps/static/hbase_logo.png?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
Binary files - no diff available.

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/ruby/hbase/admin.rb
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/ruby/hbase/admin.rb?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/ruby/hbase/admin.rb (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/ruby/hbase/admin.rb Thu Feb 14 13:35:54 2013
@@ -523,7 +523,7 @@ module Hbase
             [ server.getHostname(), server.getPort(), server.getStartcode() ])
           puts("        %s" % [ status.getLoad(server).toString() ])
           for name, region in status.getLoad(server).getRegionsLoad()
-            puts("        %s" % [ region.getNameAsString() ])
+            puts("        %s" % [ region.getNameAsString().dump ])
             puts("            %s" % [ region.toString() ])
           end
         end

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java Thu Feb 14 13:35:54 2013
@@ -169,9 +169,7 @@ public abstract class HBaseTestCase exte
 
   protected HRegion openClosedRegion(final HRegion closedRegion)
   throws IOException {
-    HRegion r = new HRegion(closedRegion);
-    r.initialize();
-    return r;
+    return HRegion.openHRegion(closedRegion, null);
   }
 
   /**

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java Thu Feb 14 13:35:54 2013
@@ -112,9 +112,10 @@ import org.hbase.async.Scanner;
 public class PerformanceEvaluation extends Configured implements Tool {
   protected static final Log LOG = LogFactory.getLog(PerformanceEvaluation.class.getName());
 
-  private static final int ROW_LENGTH = 1000;
+  private static final int DEFAULT_ROW_PREFIX_LENGTH = 16;
+  private static final int VALUE_LENGTH = 1000;
   private static final int ONE_GB = 1024 * 1024 * 1000;
-  private static final int ROWS_PER_GB = ONE_GB / ROW_LENGTH;
+  private static final int ROWS_PER_GB = ONE_GB / VALUE_LENGTH;
 
   public static final byte[] COMPRESSION = Bytes.toBytes("NONE");
   public static final byte[] TABLE_NAME = Bytes.toBytes("TestTable");
@@ -127,6 +128,7 @@ public class PerformanceEvaluation exten
 
   private boolean miniCluster = false;
   private boolean nomapred = false;
+  private int rowPrefixLength = DEFAULT_ROW_PREFIX_LENGTH;
   private int N = 1;
   private int R = ROWS_PER_GB;
   private byte[] tableName = TABLE_NAME;
@@ -537,10 +539,11 @@ public class PerformanceEvaluation exten
     if (this.presplitRegions == 0)
       return new byte [0][];
 
-    byte[][] splits = new byte[this.presplitRegions][];
+    int numSplitPoints = presplitRegions - 1;
+    byte[][] splits = new byte[numSplitPoints][];
     int jump = this.R  / this.presplitRegions;
-    for (int i=0; i <this.presplitRegions; i++) {
-      int rowkey = jump * i;
+    for (int i=0; i < numSplitPoints; i++) {
+      int rowkey = jump * (1 + i);
       splits[i] = format(rowkey);
     }
     return splits;
@@ -931,9 +934,9 @@ public class PerformanceEvaluation exten
       if (row.size() != 1) {
         throw new IOException((row.isEmpty() ? "No" : "Multiple (" + row.size() + ')')
                               + " KeyValue found in row");
-      } else if (row.get(0).value().length != ROW_LENGTH) {
+      } else if (row.get(0).value().length != VALUE_LENGTH) {
         throw new IOException("Invalid value length (found: " + row.get(0).value().length
-                              + ", expected: " + ROW_LENGTH + ") in row \""
+                              + ", expected: " + VALUE_LENGTH + ") in row \""
                               + new String(row.get(0).key()) + '"');
       }
     }
@@ -1420,7 +1423,7 @@ public class PerformanceEvaluation exten
    * number (Does absolute in case number is negative).
    */
   public static byte [] format(final int number) {
-    byte [] b = new byte[10];
+    byte [] b = new byte[DEFAULT_ROW_PREFIX_LENGTH + 10];
     int d = Math.abs(number);
     for (int i = b.length - 1; i >= 0; i--) {
       b[i] = (byte)((d % 10) + '0');
@@ -1436,10 +1439,10 @@ public class PerformanceEvaluation exten
    * @return Generated random value to insert into a table cell.
    */
   public static byte[] generateValue(final Random r) {
-    byte [] b = new byte [ROW_LENGTH];
+    byte [] b = new byte [VALUE_LENGTH];
     int i = 0;
 
-    for(i = 0; i < (ROW_LENGTH-8); i += 8) {
+    for(i = 0; i < (VALUE_LENGTH-8); i += 8) {
       b[i] = (byte) (65 + r.nextInt(26));
       b[i+1] = b[i];
       b[i+2] = b[i];
@@ -1451,7 +1454,7 @@ public class PerformanceEvaluation exten
     }
 
     byte a = (byte) (65 + r.nextInt(26));
-    for(; i < ROW_LENGTH; i++) {
+    for(; i < VALUE_LENGTH; i++) {
       b[i] = a;
     }
     return b;
@@ -1518,7 +1521,7 @@ public class PerformanceEvaluation exten
     Configuration conf = getConf();
     if (this.miniCluster) {
       dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
-      zooKeeperCluster = new MiniZooKeeperCluster();
+      zooKeeperCluster = new MiniZooKeeperCluster(conf);
       int zooKeeperPort = zooKeeperCluster.startup(new File(System.getProperty("java.io.tmpdir")));
 
       // mangle the conf so that the fs parameter points to the minidfs we

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java Thu Feb 14 13:35:54 2013
@@ -4807,11 +4807,11 @@ public class TestFromClientSide {
     assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
     // compact, net minus two blocks, two hits, no misses
     System.out.println("Compacting");
-    assertEquals(2, store.getNumberOfStoreFiles());
+    assertEquals(2, store.getStorefilesCount());
     store.triggerMajorCompaction();
     region.compactStores();
     waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max
-    assertEquals(1, store.getNumberOfStoreFiles());
+    assertEquals(1, store.getStorefilesCount());
     expectedBlockCount -= 2; // evicted two blocks, cached none
     assertEquals(expectedBlockCount, cache.getBlockCount());
     expectedBlockHits += 2;
@@ -4832,12 +4832,12 @@ public class TestFromClientSide {
   throws InterruptedException {
     long start = System.currentTimeMillis();
     while (start + timeout > System.currentTimeMillis() &&
-        store.getNumberOfStoreFiles() != count) {
+        store.getStorefilesCount() != count) {
       Thread.sleep(100);
     }
     System.out.println("start=" + start + ", now=" +
-        System.currentTimeMillis() + ", cur=" + store.getNumberOfStoreFiles());
-    assertEquals(count, store.getNumberOfStoreFiles());
+        System.currentTimeMillis() + ", cur=" + store.getStorefilesCount());
+    assertEquals(count, store.getStorefilesCount());
   }
 
   @Test

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java Thu Feb 14 13:35:54 2013
@@ -401,8 +401,7 @@ public class TestCoprocessorInterface ex
   HRegion reopenRegion(final HRegion closedRegion, Class<?> ... implClasses)
       throws IOException {
     //HRegionInfo info = new HRegionInfo(tableName, null, null, false);
-    HRegion r = new HRegion(closedRegion);
-    r.initialize();
+    HRegion r = HRegion.openHRegion(closedRegion, null);
 
     // this following piece is a hack. currently a coprocessorHost
     // is secretly loaded at OpenRegionHandler. we don't really

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java Thu Feb 14 13:35:54 2013
@@ -262,12 +262,10 @@ public class TestWALObserver {
         FileSystem newFS = FileSystem.get(newConf);
         // Make a new wal for new region open.
         HLog wal2 = createWAL(newConf);
-        Path tableDir =
-          HTableDescriptor.getTableDir(hbaseRootDir, hri.getTableName());
-        HRegion region = new HRegion(tableDir, wal2, FileSystem.get(newConf),
-          newConf, hri, htd, TEST_UTIL.getHBaseCluster().getRegionServer(0));
+        HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir,
+            hri, htd, wal2, TEST_UTIL.getHBaseCluster().getRegionServer(0), null);
+        long seqid2 = region.getOpenSeqNum();
 
-        long seqid2 = region.initialize();
         SampleRegionWALObserver cp2 =
           (SampleRegionWALObserver)region.getCoprocessorHost().findCoprocessor(
               SampleRegionWALObserver.class.getName());

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java Thu Feb 14 13:35:54 2013
@@ -140,7 +140,6 @@ public class TestFilterList {
     /* We should filter any row */
     rowkey = Bytes.toBytes("z");
     assertTrue(filterMPONE.filterRowKey(rowkey, 0, rowkey.length));
-    assertTrue(filterMPONE.filterRow());
     assertTrue(filterMPONE.filterAllRemaining());
 
   }
@@ -191,9 +190,6 @@ public class TestFilterList {
     // Should fail here; row should be filtered out.
     KeyValue kv = new KeyValue(rowkey, rowkey, rowkey, rowkey);
     assertTrue(Filter.ReturnCode.NEXT_ROW == filterMPALL.filterKeyValue(kv));
-
-    // Both filters in Set should be satisfied by now
-    assertTrue(filterMPALL.filterRow());
   }
 
   /**

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java Thu Feb 14 13:35:54 2013
@@ -105,15 +105,15 @@ public class TestEncodedSeekers {
 
     //write the data, but leave some in the memstore
     doPuts(region);
-    
+
     //verify correctness when memstore contains data
     doGets(region);
-    
+
     //verify correctness again after compacting
     region.compactStores();
     doGets(region);
 
-    
+
     Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();
 
     // Ensure that compactions don't pollute the cache with unencoded blocks
@@ -124,8 +124,8 @@ public class TestEncodedSeekers {
     assertEquals(encoding, encodingInCache);
     assertTrue(encodingCounts.get(encodingInCache) > 0);
   }
-  
-  
+
+
   private void doPuts(HRegion region) throws IOException{
     LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
      for (int i = 0; i < NUM_ROWS; ++i) {
@@ -146,8 +146,8 @@ public class TestEncodedSeekers {
       }
     }
   }
-  
-  
+
+
   private void doGets(HRegion region) throws IOException{
     for (int i = 0; i < NUM_ROWS; ++i) {
       final byte[] rowKey = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java Thu Feb 14 13:35:54 2013
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.client.Pu
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -151,7 +152,8 @@ public class TestScannerSelectionUsingTT
 
     // Exercise both compaction codepaths.
     if (explicitCompaction) {
-      region.getStore(FAMILY_BYTES).compactRecentForTesting(totalNumFiles);
+      HStore store = (HStore)region.getStore(FAMILY_BYTES);
+      store.compactRecentForTestingAssumingDefaultPolicy(totalNumFiles);
     } else {
       region.compactStores();
     }

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java Thu Feb 14 13:35:54 2013
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertEqu
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.util.Collection;
 import java.util.TreeMap;
 import java.util.List;
 
@@ -195,7 +196,7 @@ public class TestLoadIncrementalHFiles {
     loader.doBulkLoad(dir, table);
 
     // Get the store files
-    List<StoreFile> files = util.getHBaseCluster().
+    Collection<StoreFile> files = util.getHBaseCluster().
         getRegions(TABLE).get(0).getStore(FAMILY).getStorefiles();
     for (StoreFile file: files) {
       // the sequenceId gets initialized during createReader

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java Thu Feb 14 13:35:54 2013
@@ -140,6 +140,7 @@ public class TestAssignmentManager {
     this.serverManager = Mockito.mock(ServerManager.class);
     Mockito.when(this.serverManager.isServerOnline(SERVERNAME_A)).thenReturn(true);
     Mockito.when(this.serverManager.isServerOnline(SERVERNAME_B)).thenReturn(true);
+    Mockito.when(this.serverManager.getDeadServers()).thenReturn(new DeadServer());
     final Map<ServerName, ServerLoad> onlineServers = new HashMap<ServerName, ServerLoad>();
     onlineServers.put(SERVERNAME_B, ServerLoad.EMPTY_SERVERLOAD);
     onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD);
@@ -186,6 +187,8 @@ public class TestAssignmentManager {
   @Test(timeout = 5000)
   public void testBalanceOnMasterFailoverScenarioWithOpenedNode()
   throws IOException, KeeperException, InterruptedException, ServiceException, DeserializationException {
+    Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, REGIONINFO, 0, null, true)).
+        thenReturn(true);
     AssignmentManagerWithExtrasForTesting am =
       setUpMockedAssignmentManager(this.server, this.serverManager);
     try {
@@ -232,6 +235,8 @@ public class TestAssignmentManager {
   @Test(timeout = 5000)
   public void testBalanceOnMasterFailoverScenarioWithClosedNode()
   throws IOException, KeeperException, InterruptedException, ServiceException, DeserializationException {
+    Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, REGIONINFO, 0, null, true)).
+        thenReturn(true);
     AssignmentManagerWithExtrasForTesting am =
       setUpMockedAssignmentManager(this.server, this.serverManager);
     try {
@@ -279,6 +284,8 @@ public class TestAssignmentManager {
   @Test(timeout = 5000)
   public void testBalanceOnMasterFailoverScenarioWithOfflineNode()
   throws IOException, KeeperException, InterruptedException, ServiceException, DeserializationException {
+    Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, REGIONINFO, 0, null, true)).
+        thenReturn(true);
     AssignmentManagerWithExtrasForTesting am =
       setUpMockedAssignmentManager(this.server, this.serverManager);
     try {
@@ -574,7 +581,7 @@ public class TestAssignmentManager {
     ClientProtocol implementation = Mockito.mock(ClientProtocol.class);
     // Get a meta row result that has region up on SERVERNAME_A
 
-    Result r = null;
+    Result r;
     if (splitRegion) {
       r = MetaMockingUtil.getMetaTableRowResultAsSplitRegion(REGIONINFO, SERVERNAME_A);
     } else {
@@ -912,7 +919,7 @@ public class TestAssignmentManager {
   /**
    * When a region is in transition, if the region server opening the region goes down,
    * the region assignment takes a long time normally (waiting for timeout monitor to trigger assign).
-   * This test is to make sure SSH times out the transition right away.
+   * This test is to make sure SSH reassigns it right away.
    */
   @Test
   public void testSSHTimesOutOpeningRegionTransition()
@@ -925,6 +932,7 @@ public class TestAssignmentManager {
     // adding region in pending open.
     RegionState state = new RegionState(REGIONINFO,
       State.OPENING, System.currentTimeMillis(), SERVERNAME_A);
+    am.getRegionStates().regionOnline(REGIONINFO, SERVERNAME_B);
     am.getRegionStates().regionsInTransition.put(REGIONINFO.getEncodedName(), state);
     // adding region plan
     am.regionPlans.put(REGIONINFO.getEncodedName(),
@@ -932,8 +940,9 @@ public class TestAssignmentManager {
     am.getZKTable().setEnabledTable(REGIONINFO.getTableNameAsString());
 
     try {
+      am.assignInvoked = false;
       processServerShutdownHandler(ct, am, false);
-      assertTrue("Transtion is timed out", state.getStamp() == 0);
+      assertTrue(am.assignInvoked);
     } finally {
       am.getRegionStates().regionsInTransition.remove(REGIONINFO.getEncodedName());
       am.regionPlans.remove(REGIONINFO.getEncodedName());
@@ -941,6 +950,30 @@ public class TestAssignmentManager {
   }
 
   /**
+   * Scenario:<ul>
+   *  <li> master starts a close, and creates a znode</li>
+   *  <li> it fails just at this moment, before contacting the RS</li>
+   *  <li> while the second master is coming up, the targeted RS dies. But it's before ZK timeout so
+   *    we don't know, and we have an exception.</li>
+   *  <li> the master must handle this nicely and reassign.
+   *  </ul>
+   */
+  @Test
+  public void testClosingFailureDuringRecovery() throws Exception {
+
+    AssignmentManagerWithExtrasForTesting am =
+        setUpMockedAssignmentManager(this.server, this.serverManager);
+    ZKAssign.createNodeClosing(this.watcher, REGIONINFO, SERVERNAME_A);
+    am.getRegionStates().createRegionState(REGIONINFO);
+
+    assertFalse( am.getRegionStates().isRegionsInTransition() );
+
+    am.processRegionInTransition(REGIONINFO.getEncodedName(), REGIONINFO);
+
+    assertTrue( am.getRegionStates().isRegionsInTransition() );
+  }
+
+  /**
    * Creates a new ephemeral node in the SPLITTING state for the specified region.
    * Create it ephemeral in case regionserver dies mid-split.
    *
@@ -1084,7 +1117,7 @@ public class TestAssignmentManager {
     @Override
     public void assign(List<HRegionInfo> regions)
         throws IOException, InterruptedException {
-      assignInvoked = true;
+      assignInvoked = (regions != null && regions.size() > 0);
     }
 
     /** reset the watcher */
@@ -1136,7 +1169,7 @@ public class TestAssignmentManager {
         } catch (InterruptedException e) {
           throw new RuntimeException(e);
         }
-      };
+      }
     };
     t.start();
     while (!t.isAlive()) Threads.sleep(1);

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java Thu Feb 14 13:35:54 2013
@@ -17,28 +17,39 @@
  */
 package org.apache.hadoop.hbase.master;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.Assert;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-@Category(SmallTests.class)
+import java.util.List;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+@Category(MediumTests.class)
 public class TestDeadServer {
+  final ServerName hostname123 = new ServerName("127.0.0.1", 123, 3L);
+  final ServerName hostname123_2 = new ServerName("127.0.0.1", 123, 4L);
+  final ServerName hostname1234 = new ServerName("127.0.0.2", 1234, 4L);
+  final ServerName hostname12345 = new ServerName("127.0.0.2", 12345, 4L);
+
   @Test public void testIsDead() {
     DeadServer ds = new DeadServer();
-    final ServerName hostname123 = new ServerName("127.0.0.1", 123, 3L);
     ds.add(hostname123);
     assertTrue(ds.areDeadServersInProgress());
     ds.finish(hostname123);
     assertFalse(ds.areDeadServersInProgress());
-    final ServerName hostname1234 = new ServerName("127.0.0.2", 1234, 4L);
+
     ds.add(hostname1234);
     assertTrue(ds.areDeadServersInProgress());
     ds.finish(hostname1234);
     assertFalse(ds.areDeadServersInProgress());
-    final ServerName hostname12345 = new ServerName("127.0.0.2", 12345, 4L);
+
     ds.add(hostname12345);
     assertTrue(ds.areDeadServersInProgress());
     ds.finish(hostname12345);
@@ -52,11 +63,54 @@ public class TestDeadServer {
     ds.add(deadServer);
     assertTrue(ds.isDeadServer(deadServer));
     final ServerName deadServerHostComingAlive =
-      new ServerName("127.0.0.1", 9090, 112321L);
+      new ServerName("127.0.0.1", 9090, 223341L);
     assertTrue(ds.cleanPreviousInstance(deadServerHostComingAlive));
     assertFalse(ds.isDeadServer(deadServer));
     assertFalse(ds.cleanPreviousInstance(deadServerHostComingAlive));
   }
 
+
+  @Test
+  public void testSortExtract(){
+    ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
+    EnvironmentEdgeManager.injectEdge(mee);
+    mee.setValue(1);
+
+    DeadServer d = new DeadServer();
+
+
+    d.add(hostname123);
+    mee.incValue(1);
+    d.add(hostname1234);
+    mee.incValue(1);
+    d.add(hostname12345);
+
+    List<Pair<ServerName, Long>> copy = d.copyDeadServersSince(2L);
+    Assert.assertEquals(2, copy.size());
+
+    Assert.assertEquals(hostname1234, copy.get(0).getFirst());
+    Assert.assertEquals(new Long(2L), copy.get(0).getSecond());
+
+    Assert.assertEquals(hostname12345, copy.get(1).getFirst());
+    Assert.assertEquals(new Long(3L), copy.get(1).getSecond());
+
+    EnvironmentEdgeManager.reset();
+  }
+
+  @Test
+  public void testClean(){
+    DeadServer d = new DeadServer();
+    d.add(hostname123);
+
+    d.cleanPreviousInstance(hostname12345);
+    Assert.assertFalse(d.isEmpty());
+
+    d.cleanPreviousInstance(hostname1234);
+    Assert.assertFalse(d.isEmpty());
+
+    d.cleanPreviousInstance(hostname123_2);
+    Assert.assertTrue(d.isEmpty());
+  }
+
 }
 

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java Thu Feb 14 13:35:54 2013
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.LargeTests;
-import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.ServerName;
@@ -154,11 +153,6 @@ public class TestMasterFailover {
 
     // Create config to use for this cluster
     Configuration conf = HBaseConfiguration.create();
-    // Need to drop the timeout much lower
-    conf.setInt("hbase.master.assignment.timeoutmonitor.period", 2000);
-    conf.setInt("hbase.master.assignment.timeoutmonitor.timeout", 4000);
-    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 3);
-    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, 3);
 
     // Start the cluster
     HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
@@ -278,6 +272,8 @@ public class TestMasterFailover {
      */
 
     // Region that should be assigned but is not and is in ZK as OFFLINE
+    // Cause: This can happen if the master crashed after creating the znode but before sending the
+    //  request to the region server
     HRegionInfo region = enabledRegions.remove(0);
     regionsThatShouldBeOnline.add(region);
     ZKAssign.createNodeOffline(zkw, region, serverName);
@@ -285,6 +281,7 @@ public class TestMasterFailover {
     /*
      * ZK = CLOSING
      */
+    // Cause: Same as offline.
     regionsThatShouldBeOnline.add(closingRegion);
     ZKAssign.createNodeClosing(zkw, closingRegion, serverName);
 
@@ -293,6 +290,7 @@ public class TestMasterFailover {
      */
 
     // Region of enabled table closed but not ack
+    //Cause: Master was down while the region server updated the ZK status.
     region = enabledRegions.remove(0);
     regionsThatShouldBeOnline.add(region);
     int version = ZKAssign.createNodeClosing(zkw, region, serverName);
@@ -305,20 +303,11 @@ public class TestMasterFailover {
     ZKAssign.transitionNodeClosed(zkw, region, serverName, version);
 
     /*
-     * ZK = OPENING
-     */
-
-    // RS was opening a region of enabled table but never finishes
-    region = enabledRegions.remove(0);
-    regionsThatShouldBeOnline.add(region);
-    ZKAssign.createNodeOffline(zkw, region, serverName);
-    ZKAssign.transitionNodeOpening(zkw, region, serverName);
-
-    /*
      * ZK = OPENED
      */
 
     // Region of enabled table was opened on RS
+    // Cause: as offline
     region = enabledRegions.remove(0);
     regionsThatShouldBeOnline.add(region);
     ZKAssign.createNodeOffline(zkw, region, serverName);
@@ -333,6 +322,7 @@ public class TestMasterFailover {
     }
 
     // Region of disable table was opened on RS
+    // Cause: Master failed while updating the status for this region server.
     region = disabledRegions.remove(0);
     regionsThatShouldBeOffline.add(region);
     ZKAssign.createNodeOffline(zkw, region, serverName);
@@ -457,9 +447,7 @@ public class TestMasterFailover {
     // Create and start the cluster
     HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
     Configuration conf = TEST_UTIL.getConfiguration();
-    // Need to drop the timeout much lower
-    conf.setInt("hbase.master.assignment.timeoutmonitor.period", 2000);
-    conf.setInt("hbase.master.assignment.timeoutmonitor.timeout", 4000);
+
     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, 2);
     TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
@@ -771,25 +759,6 @@ public class TestMasterFailover {
     assertTrue(cluster.waitForActiveAndReadyMaster());
     log("Master is ready");
 
-    // Let's add some weird states to master in-memory state
-
-    // After HBASE-3181, we need to have some ZK state if we're PENDING_OPEN
-    // b/c it is impossible for us to get into this state w/o a zk node
-    // this is not true of PENDING_CLOSE
-
-    // PENDING_OPEN and enabled
-    region = enabledRegions.remove(0);
-    regionsThatShouldBeOnline.add(region);
-    master.getAssignmentManager().getRegionStates().updateRegionState(
-      region, RegionState.State.PENDING_OPEN);
-    ZKAssign.createNodeOffline(zkw, region, master.getServerName());
-    // PENDING_OPEN and disabled
-    region = disabledRegions.remove(0);
-    regionsThatShouldBeOffline.add(region);
-    master.getAssignmentManager().getRegionStates().updateRegionState(
-      region, RegionState.State.PENDING_OPEN);
-    ZKAssign.createNodeOffline(zkw, region, master.getServerName());
-
     // Failover should be completed, now wait for no RIT
     log("Waiting for no more RIT");
     ZKAssign.blockUntilNoRIT(zkw);
@@ -863,8 +832,6 @@ public class TestMasterFailover {
     // Start the cluster
     HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
     Configuration conf = TEST_UTIL.getConfiguration();
-    conf.setInt("hbase.master.assignment.timeoutmonitor.period", 2000);
-    conf.setInt("hbase.master.assignment.timeoutmonitor.timeout", 8000);
     conf.setInt("hbase.master.info.port", -1);
 
     TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
@@ -1016,84 +983,5 @@ public class TestMasterFailover {
     // Stop the cluster
     TEST_UTIL.shutdownMiniCluster();
   }
-
-  /**
-   * return the index of the active master in the cluster
-   * @throws MasterNotRunningException if no active master found
-   */
-  private int getActiveMasterIndex(MiniHBaseCluster cluster) throws MasterNotRunningException {
-    // get all the master threads
-    List<MasterThread> masterThreads = cluster.getMasterThreads();
-
-    for (int i = 0; i < masterThreads.size(); i++) {
-      if (masterThreads.get(i).getMaster().isActiveMaster()) {
-        return i;
-      }
-    }
-    throw new MasterNotRunningException();
-  }
-
-  /**
-   * Kill the master and wait for a new active master to show up
-   * @param cluster
-   * @return the new active master
-   * @throws InterruptedException
-   * @throws IOException
-   */
-  private HMaster killActiveAndWaitForNewActive(MiniHBaseCluster cluster)
-  throws InterruptedException, IOException {
-    int activeIndex = getActiveMasterIndex(cluster);
-    HMaster active = cluster.getMaster();
-    cluster.stopMaster(activeIndex);
-    cluster.waitOnMaster(activeIndex);
-    assertTrue(cluster.waitForActiveAndReadyMaster());
-    // double check this is actually a new master
-    HMaster newActive = cluster.getMaster();
-    assertFalse(active == newActive);
-    return newActive;
-  }
-
-  /**
-   * Test that if the master fails, the load balancer maintains its
-   * state (running or not) when the next master takes over
-   * @throws Exception
-   */
-  @Test (timeout=240000)
-  public void testMasterFailoverBalancerPersistence() throws Exception {
-    final int NUM_MASTERS = 3;
-    final int NUM_RS = 1;
-
-    // Start the cluster
-    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-
-    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
-    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-
-    assertTrue(cluster.waitForActiveAndReadyMaster());
-    HMaster active = cluster.getMaster();
-    // check that the balancer is on by default for the active master
-    ClusterStatus clusterStatus = active.getClusterStatus();
-    assertTrue(clusterStatus.isBalancerOn());
-
-    active = killActiveAndWaitForNewActive(cluster);
-
-    // ensure the load balancer is still running on new master
-    clusterStatus = active.getClusterStatus();
-    assertTrue(clusterStatus.isBalancerOn());
-
-    // turn off the load balancer
-    active.balanceSwitch(false);
-
-    // once more, kill active master and wait for new active master to show up
-    active = killActiveAndWaitForNewActive(cluster);
-
-    // ensure the load balancer is not running on the new master
-    clusterStatus = active.getClusterStatus();
-    assertFalse(clusterStatus.isBalancerOn());
-
-    // Stop the cluster
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
 }
 

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java Thu Feb 14 13:35:54 2013
@@ -310,7 +310,7 @@ public class  TestRollingRestart {
       ServerName serverName) throws InterruptedException {
     ServerManager sm = activeMaster.getMaster().getServerManager();
     // First wait for it to be in dead list
-    while (!sm.getDeadServers().contains(serverName)) {
+    while (!sm.getDeadServers().isDeadServer(serverName)) {
       log("Waiting for [" + serverName + "] to be listed as dead in master");
       Thread.sleep(1);
     }

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=1446173&r1=1446172&r2=1446173&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java Thu Feb 14 13:35:54 2013
@@ -23,6 +23,7 @@ import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -314,7 +315,7 @@ public class TestCompaction extends HBas
 
       // ensure that major compaction time is deterministic
       DefaultCompactionPolicy c = (DefaultCompactionPolicy)s.compactionPolicy;
-      List<StoreFile> storeFiles = s.getStorefiles();
+      Collection<StoreFile> storeFiles = s.getStorefiles();
       long mcTime = c.getNextMajorCompactTime(storeFiles);
       for (int i = 0; i < 10; ++i) {
         assertEquals(mcTime, c.getNextMajorCompactTime(storeFiles));
@@ -439,7 +440,7 @@ public class TestCompaction extends HBas
     Store store2 = this.r.stores.get(fam2);
     int numFiles1 = store2.getStorefiles().size();
     assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3
-    store2.compactRecentForTesting(compactionThreshold);   // = 3
+    ((HStore)store2).compactRecentForTestingAssumingDefaultPolicy(compactionThreshold);   // = 3
     int numFiles2 = store2.getStorefiles().size();
     // Check that we did compact
     assertTrue("Number of store files should go down", numFiles1 > numFiles2);
@@ -587,7 +588,7 @@ public class TestCompaction extends HBas
     }
     HStore store = (HStore) r.getStore(COLUMN_FAMILY);
 
-    List<StoreFile> storeFiles = store.getStorefiles();
+    Collection<StoreFile> storeFiles = store.getStorefiles();
     Compactor tool = store.compactionPolicy.getCompactor();
 
     List<Path> newFiles =