You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jx...@apache.org on 2013/02/08 00:04:12 UTC

svn commit: r1443769 - in /hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util: HBaseFsck.java hbck/HFileCorruptionChecker.java

Author: jxiang
Date: Thu Feb  7 23:04:12 2013
New Revision: 1443769

URL: http://svn.apache.org/r1443769
Log:
HBASE-7776 Use ErrorReporter/Log instead of System.out in hbck

Modified:
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java?rev=1443769&r1=1443768&r2=1443769&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java Thu Feb  7 23:04:12 2013
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
 import java.io.PrintWriter;
+import java.io.StringWriter;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -1417,8 +1418,9 @@ public class HBaseFsck extends Configure
         FSUtils.checkAccess(ugi, file, FsAction.WRITE);
       } catch (AccessControlException ace) {
         LOG.warn("Got AccessControlException when preCheckPermission ", ace);
-        System.err.println("Current user " + ugi.getUserName() + " does not have write perms to " + file.getPath()
-            + ". Please rerun hbck as hdfs user " + file.getOwner());
+        errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName()
+          + " does not have write perms to " + file.getPath()
+          + ". Please rerun hbck as hdfs user " + file.getOwner());
         throw new AccessControlException(ace);
       }
     }
@@ -2327,19 +2329,19 @@ public class HBaseFsck extends Configure
 
       if (details) {
         // do full region split map dump
-        System.out.println("---- Table '"  +  this.tableName
+        errors.print("---- Table '"  +  this.tableName
             + "': region split map");
         dump(splits, regions);
-        System.out.println("---- Table '"  +  this.tableName
+        errors.print("---- Table '"  +  this.tableName
             + "': overlap groups");
         dumpOverlapProblems(overlapGroups);
-        System.out.println("There are " + overlapGroups.keySet().size()
+        errors.print("There are " + overlapGroups.keySet().size()
             + " overlap groups with " + overlapGroups.size()
             + " overlapping regions");
       }
       if (!sidelinedRegions.isEmpty()) {
         LOG.warn("Sidelined big overlapped regions, please bulk load them!");
-        System.out.println("---- Table '"  +  this.tableName
+        errors.print("---- Table '"  +  this.tableName
             + "': sidelined big overlapped regions");
         dumpSidelinedRegions(sidelinedRegions);
       }
@@ -2354,13 +2356,15 @@ public class HBaseFsck extends Configure
      */
     void dump(SortedSet<byte[]> splits, Multimap<byte[], HbckInfo> regions) {
       // we display this way because the last end key should be displayed as well.
+      StringBuilder sb = new StringBuilder();
       for (byte[] k : splits) {
-        System.out.print(Bytes.toStringBinary(k) + ":\t");
+        sb.setLength(0); // clear out existing buffer, if any.
+        sb.append(Bytes.toStringBinary(k) + ":\t");
         for (HbckInfo r : regions.get(k)) {
-          System.out.print("[ "+ r.toString() + ", "
+          sb.append("[ "+ r.toString() + ", "
               + Bytes.toStringBinary(r.getEndKey())+ "]\t");
         }
-        System.out.println();
+        errors.print(sb.toString());
       }
     }
   }
@@ -2369,12 +2373,12 @@ public class HBaseFsck extends Configure
     // we display this way because the last end key should be displayed as
     // well.
     for (byte[] k : regions.keySet()) {
-      System.out.print(Bytes.toStringBinary(k) + ":\n");
+      errors.print(Bytes.toStringBinary(k) + ":");
       for (HbckInfo r : regions.get(k)) {
-        System.out.print("[ " + r.toString() + ", "
-            + Bytes.toStringBinary(r.getEndKey()) + "]\n");
+        errors.print("[ " + r.toString() + ", "
+            + Bytes.toStringBinary(r.getEndKey()) + "]");
       }
-      System.out.println("----");
+      errors.print("----");
     }
   }
 
@@ -2382,9 +2386,9 @@ public class HBaseFsck extends Configure
     for (Map.Entry<Path, HbckInfo> entry: regions.entrySet()) {
       String tableName = Bytes.toStringBinary(entry.getValue().getTableName());
       Path path = entry.getKey();
-      System.out.println("This sidelined region dir should be bulk loaded: "
+      errors.print("This sidelined region dir should be bulk loaded: "
         + path.toString());
-      System.out.println("Bulk load command looks like: "
+      errors.print("Bulk load command looks like: "
         + "hbase org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles "
         + path.toUri().getPath() + " "+ tableName);
     }
@@ -2815,23 +2819,25 @@ public class HBaseFsck extends Configure
    * Prints summary of all tables found on the system.
    */
   private void printTableSummary(SortedMap<String, TableInfo> tablesInfo) {
-    System.out.println("Summary:");
+    StringBuilder sb = new StringBuilder();
+    errors.print("Summary:");
     for (TableInfo tInfo : tablesInfo.values()) {
       if (errors.tableHasErrors(tInfo)) {
-        System.out.println("Table " + tInfo.getName() + " is inconsistent.");
+        errors.print("Table " + tInfo.getName() + " is inconsistent.");
       } else {
-        System.out.println("  " + tInfo.getName() + " is okay.");
+        errors.print("  " + tInfo.getName() + " is okay.");
       }
-      System.out.println("    Number of regions: " + tInfo.getNumRegions());
-      System.out.print("    Deployed on: ");
+      errors.print("    Number of regions: " + tInfo.getNumRegions());
+      sb.setLength(0); // clear out existing buffer, if any.
+      sb.append("    Deployed on: ");
       for (ServerName server : tInfo.deployedOn) {
-        System.out.print(" " + server.toString());
+        sb.append(" " + server.toString());
       }
-      System.out.println();
+      errors.print(sb.toString());
     }
   }
 
-  private static ErrorReporter getErrorReporter(
+  static ErrorReporter getErrorReporter(
       final Configuration conf) throws ClassNotFoundException {
     Class<? extends ErrorReporter> reporter = conf.getClass("hbasefsck.errorreporter", PrintingErrorReporter.class, ErrorReporter.class);
     return (ErrorReporter)ReflectionUtils.newInstance(reporter, conf);
@@ -2844,7 +2850,8 @@ public class HBaseFsck extends Configure
       MULTI_DEPLOYED, SHOULD_NOT_BE_DEPLOYED, MULTI_META_REGION, RS_CONNECT_FAILURE,
       FIRST_REGION_STARTKEY_NOT_EMPTY, LAST_REGION_ENDKEY_NOT_EMPTY, DUPE_STARTKEYS,
       HOLE_IN_REGION_CHAIN, OVERLAP_IN_REGION_CHAIN, REGION_CYCLE, DEGENERATE_REGION,
-      ORPHAN_HDFS_REGION, LINGERING_SPLIT_PARENT, NO_TABLEINFO_FILE, LINGERING_REFERENCE_HFILE
+      ORPHAN_HDFS_REGION, LINGERING_SPLIT_PARENT, NO_TABLEINFO_FILE, LINGERING_REFERENCE_HFILE,
+      WRONG_USAGE
     }
     public void clear();
     public void report(String message);
@@ -2878,6 +2885,11 @@ public class HBaseFsck extends Configure
     }
 
     public synchronized void reportError(ERROR_CODE errorCode, String message) {
+      if (errorCode == ERROR_CODE.WRONG_USAGE) {
+        System.err.println(message);
+        return;
+      }
+
       errorList.add(errorCode);
       if (!summary) {
         System.out.println("ERROR: " + message);
@@ -3365,48 +3377,53 @@ public class HBaseFsck extends Configure
   }
 
   protected HBaseFsck printUsageAndExit() {
-    System.err.println("Usage: fsck [opts] {only tables}");
-    System.err.println(" where [opts] are:");
-    System.err.println("   -help Display help options (this)");
-    System.err.println("   -details Display full report of all regions.");
-    System.err.println("   -timelag <timeInSeconds>  Process only regions that " +
+    StringWriter sw = new StringWriter(2048);
+    PrintWriter out = new PrintWriter(sw);
+    out.println("Usage: fsck [opts] {only tables}");
+    out.println(" where [opts] are:");
+    out.println("   -help Display help options (this)");
+    out.println("   -details Display full report of all regions.");
+    out.println("   -timelag <timeInSeconds>  Process only regions that " +
                        " have not experienced any metadata updates in the last " +
                        " <timeInSeconds> seconds.");
-    System.err.println("   -sleepBeforeRerun <timeInSeconds> Sleep this many seconds" +
+    out.println("   -sleepBeforeRerun <timeInSeconds> Sleep this many seconds" +
         " before checking if the fix worked if run with -fix");
-    System.err.println("   -summary Print only summary of the tables and status.");
-    System.err.println("   -metaonly Only check the state of ROOT and META tables.");
-    System.err.println("   -sidelineDir <hdfs://> HDFS path to backup existing meta and root.");
-
-    System.err.println("");
-    System.err.println("  Metadata Repair options: (expert features, use with caution!)");
-    System.err.println("   -fix              Try to fix region assignments.  This is for backwards compatiblity");
-    System.err.println("   -fixAssignments   Try to fix region assignments.  Replaces the old -fix");
-    System.err.println("   -fixMeta          Try to fix meta problems.  This assumes HDFS region info is good.");
-    System.err.println("   -noHdfsChecking   Don't load/check region info from HDFS."
+    out.println("   -summary Print only summary of the tables and status.");
+    out.println("   -metaonly Only check the state of ROOT and META tables.");
+    out.println("   -sidelineDir <hdfs://> HDFS path to backup existing meta and root.");
+
+    out.println("");
+    out.println("  Metadata Repair options: (expert features, use with caution!)");
+    out.println("   -fix              Try to fix region assignments.  This is for backwards compatiblity");
+    out.println("   -fixAssignments   Try to fix region assignments.  Replaces the old -fix");
+    out.println("   -fixMeta          Try to fix meta problems.  This assumes HDFS region info is good.");
+    out.println("   -noHdfsChecking   Don't load/check region info from HDFS."
         + " Assumes META region info is good. Won't check/fix any HDFS issue, e.g. hole, orphan, or overlap");
-    System.err.println("   -fixHdfsHoles     Try to fix region holes in hdfs.");
-    System.err.println("   -fixHdfsOrphans   Try to fix region dirs with no .regioninfo file in hdfs");
-    System.err.println("   -fixTableOrphans  Try to fix table dirs with no .tableinfo file in hdfs (online mode only)");
-    System.err.println("   -fixHdfsOverlaps  Try to fix region overlaps in hdfs.");
-    System.err.println("   -fixVersionFile   Try to fix missing hbase.version file in hdfs.");
-    System.err.println("   -maxMerge <n>     When fixing region overlaps, allow at most <n> regions to merge. (n=" + DEFAULT_MAX_MERGE +" by default)");
-    System.err.println("   -sidelineBigOverlaps  When fixing region overlaps, allow to sideline big overlaps");
-    System.err.println("   -maxOverlapsToSideline <n>  When fixing region overlaps, allow at most <n> regions to sideline per group. (n=" + DEFAULT_OVERLAPS_TO_SIDELINE +" by default)");
-    System.err.println("   -fixSplitParents  Try to force offline split parents to be online.");
-    System.err.println("   -ignorePreCheckPermission  ignore filesystem permission pre-check");
-    System.err.println("   -fixReferenceFiles  Try to offline lingering reference store files");
-
-    System.err.println("");
-    System.err.println("  Datafile Repair options: (expert features, use with caution!)");
-    System.err.println("   -checkCorruptHFiles     Check all Hfiles by opening them to make sure they are valid");
-    System.err.println("   -sidelineCorruptHfiles  Quarantine corrupted HFiles.  implies -checkCorruptHfiles");
-
-    System.err.println("");
-    System.err.println("  Metadata Repair shortcuts");
-    System.err.println("   -repair           Shortcut for -fixAssignments -fixMeta -fixHdfsHoles " +
+    out.println("   -fixHdfsHoles     Try to fix region holes in hdfs.");
+    out.println("   -fixHdfsOrphans   Try to fix region dirs with no .regioninfo file in hdfs");
+    out.println("   -fixTableOrphans  Try to fix table dirs with no .tableinfo file in hdfs (online mode only)");
+    out.println("   -fixHdfsOverlaps  Try to fix region overlaps in hdfs.");
+    out.println("   -fixVersionFile   Try to fix missing hbase.version file in hdfs.");
+    out.println("   -maxMerge <n>     When fixing region overlaps, allow at most <n> regions to merge. (n=" + DEFAULT_MAX_MERGE +" by default)");
+    out.println("   -sidelineBigOverlaps  When fixing region overlaps, allow to sideline big overlaps");
+    out.println("   -maxOverlapsToSideline <n>  When fixing region overlaps, allow at most <n> regions to sideline per group. (n=" + DEFAULT_OVERLAPS_TO_SIDELINE +" by default)");
+    out.println("   -fixSplitParents  Try to force offline split parents to be online.");
+    out.println("   -ignorePreCheckPermission  ignore filesystem permission pre-check");
+    out.println("   -fixReferenceFiles  Try to offline lingering reference store files");
+
+    out.println("");
+    out.println("  Datafile Repair options: (expert features, use with caution!)");
+    out.println("   -checkCorruptHFiles     Check all Hfiles by opening them to make sure they are valid");
+    out.println("   -sidelineCorruptHfiles  Quarantine corrupted HFiles.  implies -checkCorruptHfiles");
+
+    out.println("");
+    out.println("  Metadata Repair shortcuts");
+    out.println("   -repair           Shortcut for -fixAssignments -fixMeta -fixHdfsHoles " +
         "-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps -fixReferenceFiles");
-    System.err.println("   -repairHoles      Shortcut for -fixAssignments -fixMeta -fixHdfsHoles");
+    out.println("   -repairHoles      Shortcut for -fixAssignments -fixMeta -fixHdfsHoles");
+
+    out.flush();
+    errors.reportError(ERROR_CODE.WRONG_USAGE, sw.toString());
 
     setRetCode(-2);
     return this;
@@ -3452,39 +3469,40 @@ public class HBaseFsck extends Configure
         setDisplayFullReport();
       } else if (cmd.equals("-timelag")) {
         if (i == args.length - 1) {
-          System.err.println("HBaseFsck: -timelag needs a value.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -timelag needs a value.");
           return printUsageAndExit();
         }
         try {
           long timelag = Long.parseLong(args[i+1]);
           setTimeLag(timelag);
         } catch (NumberFormatException e) {
-          System.err.println("-timelag needs a numeric value.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE, "-timelag needs a numeric value.");
           return printUsageAndExit();
         }
         i++;
       } else if (cmd.equals("-sleepBeforeRerun")) {
         if (i == args.length - 1) {
-          System.err.println("HBaseFsck: -sleepBeforeRerun needs a value.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE,
+            "HBaseFsck: -sleepBeforeRerun needs a value.");
           return printUsageAndExit();
         }
         try {
           sleepBeforeRerun = Long.parseLong(args[i+1]);
         } catch (NumberFormatException e) {
-          System.err.println("-sleepBeforeRerun needs a numeric value.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE, "-sleepBeforeRerun needs a numeric value.");
           return printUsageAndExit();
         }
         i++;
       } else if (cmd.equals("-sidelineDir")) {
         if (i == args.length - 1) {
-          System.err.println("HBaseFsck: -sidelineDir needs a value.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -sidelineDir needs a value.");
           return printUsageAndExit();
         }
         i++;
         setSidelineDir(args[i]);
       } else if (cmd.equals("-fix")) {
-        System.err.println("This option is deprecated, please use " +
-          "-fixAssignments instead.");
+        errors.reportError(ERROR_CODE.WRONG_USAGE,
+          "This option is deprecated, please use  -fixAssignments instead.");
         setFixAssignments(true);
       } else if (cmd.equals("-fixAssignments")) {
         setFixAssignments(true);
@@ -3539,27 +3557,31 @@ public class HBaseFsck extends Configure
         setCheckHdfs(true);
       } else if (cmd.equals("-maxOverlapsToSideline")) {
         if (i == args.length - 1) {
-          System.err.println("-maxOverlapsToSideline needs a numeric value argument.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE,
+            "-maxOverlapsToSideline needs a numeric value argument.");
           return printUsageAndExit();
         }
         try {
           int maxOverlapsToSideline = Integer.parseInt(args[i+1]);
           setMaxOverlapsToSideline(maxOverlapsToSideline);
         } catch (NumberFormatException e) {
-          System.err.println("-maxOverlapsToSideline needs a numeric value argument.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE,
+            "-maxOverlapsToSideline needs a numeric value argument.");
           return printUsageAndExit();
         }
         i++;
       } else if (cmd.equals("-maxMerge")) {
         if (i == args.length - 1) {
-          System.err.println("-maxMerge needs a numeric value argument.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE,
+            "-maxMerge needs a numeric value argument.");
           return printUsageAndExit();
         }
         try {
           int maxMerge = Integer.parseInt(args[i+1]);
           setMaxMerge(maxMerge);
         } catch (NumberFormatException e) {
-          System.err.println("-maxMerge needs a numeric value argument.");
+          errors.reportError(ERROR_CODE.WRONG_USAGE,
+            "-maxMerge needs a numeric value argument.");
           return printUsageAndExit();
         }
         i++;
@@ -3568,11 +3590,11 @@ public class HBaseFsck extends Configure
       } else if (cmd.equals("-metaonly")) {
         setCheckMetaOnly();
       } else if (cmd.startsWith("-")) {
-        System.err.println("Unrecognized option:" + cmd);
+        errors.reportError(ERROR_CODE.WRONG_USAGE, "Unrecognized option:" + cmd);
         return printUsageAndExit();
       } else {
         includeTable(cmd);
-        System.out.println("Allow checking/fixes for table: " + cmd);
+        errors.print("Allow checking/fixes for table: " + cmd);
       }
     }
 
@@ -3604,9 +3626,7 @@ public class HBaseFsck extends Configure
         tableDirs = FSUtils.getTableDirs(FSUtils.getCurrentFileSystem(getConf()), rootdir);
       }
       hfcc.checkTables(tableDirs);
-      PrintWriter out = new PrintWriter(System.out);
-      hfcc.report(out);
-      out.flush();
+      hfcc.report(errors);
     }
 
     // check and fix table integrity, region consistency.
@@ -3641,13 +3661,22 @@ public class HBaseFsck extends Configure
    * ls -r for debugging purposes
    */
   void debugLsr(Path p) throws IOException {
-    debugLsr(getConf(), p);
+    debugLsr(getConf(), p, errors);
+  }
+
+  /**
+   * ls -r for debugging purposes
+   */
+  public static void debugLsr(Configuration conf,
+      Path p) throws IOException {
+    debugLsr(conf, p, new PrintingErrorReporter());
   }
 
   /**
    * ls -r for debugging purposes
    */
-  public static void debugLsr(Configuration conf, Path p) throws IOException {
+  public static void debugLsr(Configuration conf,
+      Path p, ErrorReporter errors) throws IOException {
     if (!LOG.isDebugEnabled() || p == null) {
       return;
     }
@@ -3657,7 +3686,7 @@ public class HBaseFsck extends Configure
       // nothing
       return;
     }
-    System.out.println(p);
+    errors.print(p.toString());
 
     if (fs.isFile(p)) {
       return;
@@ -3666,7 +3695,7 @@ public class HBaseFsck extends Configure
     if (fs.getFileStatus(p).isDir()) {
       FileStatus[] fss= fs.listStatus(p);
       for (FileStatus status : fss) {
-        debugLsr(conf, status.getPath());
+        debugLsr(conf, status.getPath(), errors);
       }
     }
   }

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java?rev=1443769&r1=1443768&r2=1443769&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java Thu Feb  7 23:04:12 2013
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.util.hbc
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.PrintWriter;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
@@ -45,6 +44,7 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.util.FSUtils.FamilyDirFilter;
 import org.apache.hadoop.hbase.util.FSUtils.HFileFilter;
 import org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter;
+import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
 
 /**
  * This class marches through all of the region's hfiles and verifies that
@@ -338,22 +338,22 @@ public class HFileCorruptionChecker {
    * Print a human readable summary of hfile quarantining operations.
    * @param out
    */
-  public void report(PrintWriter out) {
-    out.println("Checked " + hfilesChecked.get() + " hfile for corruption");
-    out.println("  HFiles corrupted:                  " + corrupted.size());
+  public void report(ErrorReporter out) {
+    out.print("Checked " + hfilesChecked.get() + " hfile for corruption");
+    out.print("  HFiles corrupted:                  " + corrupted.size());
     if (inQuarantineMode) {
-      out.println("    HFiles successfully quarantined: " + quarantined.size());
+      out.print("    HFiles successfully quarantined: " + quarantined.size());
       for (Path sq : quarantined) {
-        out.println("      " + sq);
+        out.print("      " + sq);
       }
-      out.println("    HFiles failed quarantine:        " + failures.size());
+      out.print("    HFiles failed quarantine:        " + failures.size());
       for (Path fq : failures) {
-        out.println("      " + fq);
+        out.print("      " + fq);
       }
     }
-    out.println("    HFiles moved while checking:     " + missing.size());
+    out.print("    HFiles moved while checking:     " + missing.size());
     for (Path mq : missing) {
-      out.println("      " + mq);
+      out.print("      " + mq);
     }
 
     String initialState = (corrupted.size() == 0) ? "OK" : "CORRUPTED";
@@ -361,9 +361,9 @@ public class HFileCorruptionChecker {
         : "CORRUPTED";
 
     if (inQuarantineMode) {
-      out.println("Summary: " + initialState + " => " + fixedState);
+      out.print("Summary: " + initialState + " => " + fixedState);
     } else {
-      out.println("Summary: " + initialState);
+      out.print("Summary: " + initialState);
     }
   }
 }