You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2008/05/28 20:27:37 UTC

svn commit: r661039 - in /hadoop/hbase/branches/0.1: CHANGES.txt src/java/org/apache/hadoop/hbase/HRegion.java src/java/org/apache/hadoop/hbase/HStore.java

Author: stack
Date: Wed May 28 11:27:37 2008
New Revision: 661039

URL: http://svn.apache.org/viewvc?rev=661039&view=rev
Log:
HBASE-646 EOFException opening HStoreFile info file (spin on HBASE-645 and 550)

Modified:
    hadoop/hbase/branches/0.1/CHANGES.txt
    hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java
    hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java

Modified: hadoop/hbase/branches/0.1/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/CHANGES.txt?rev=661039&r1=661038&r2=661039&view=diff
==============================================================================
--- hadoop/hbase/branches/0.1/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.1/CHANGES.txt Wed May 28 11:27:37 2008
@@ -6,6 +6,7 @@
    HBASE-641   Improve master split logging
    HBASE-642   Splitting log in a hostile environment -- bad hdfs -- we drop
                write-ahead-log edits
+   HBASE-646   EOFException opening HStoreFile info file (spin on HBASE-645 and 550)
 
 Release 0.1.2 - 05/13/2008
 

Modified: hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java?rev=661039&r1=661038&r2=661039&view=diff
==============================================================================
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java (original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HRegion.java Wed May 28 11:27:37 2008
@@ -1814,7 +1814,7 @@
             filter != null ?
               (RowFilterInterface)WritableUtils.clone(filter, conf) : filter);
         }
-      } catch(IOException e) {
+      } catch (IOException e) {
         for (int i = 0; i < this.scanners.length; i++) {
           if(scanners[i] != null) {
             closeScanner(i);
@@ -1940,12 +1940,17 @@
         try {
           scanners[i].close();
         } catch (IOException e) {
-          LOG.warn("Failed closeing scanner " + i, e);
+          LOG.warn("Failed closing scanner " + i, e);
         }
       } finally {
         scanners[i] = null;
-        resultSets[i] = null;
-        keys[i] = null;
+        // These data members can be null if exception in constructor
+        if (resultSets != null) {
+          resultSets[i] = null;
+        }
+        if (keys != null) {
+          keys[i] = null;
+        }
       }
     }
 

Modified: hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java?rev=661039&r1=661038&r2=661039&view=diff
==============================================================================
--- hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java (original)
+++ hadoop/hbase/branches/0.1/src/java/org/apache/hadoop/hbase/HStore.java Wed May 28 11:27:37 2008
@@ -992,6 +992,14 @@
     ArrayList<HStoreFile> results = new ArrayList<HStoreFile>(infofiles.length);
     ArrayList<Path> mapfiles = new ArrayList<Path>(infofiles.length);
     for (Path p: infofiles) {
+      // Check for empty info file.  Should never be the case but can happen
+      // after data loss in hdfs for whatever reason (upgrade, etc.): HBASE-646
+      if (this.fs.getFileStatus(p).getLen() <= 0) {
+        LOG.warn("Skipping " + p + " because its empty.  DATA LOSS?  Can " +
+          "this scenario be repaired?  HBASE-646");
+        continue;
+      }
+
       Matcher m = REF_NAME_PARSER.matcher(p.getName());
       /*
        *  *  *  *  *  N O T E  *  *  *  *  *
@@ -1015,13 +1023,20 @@
       Path mapfile = curfile.getMapFilePath();
       if (!fs.exists(mapfile)) {
         fs.delete(curfile.getInfoFilePath());
-        LOG.warn("Mapfile " + mapfile.toString() + " does not exist. " +
-          "Cleaned up info file.  Continuing...Probable DATA LOSS!!!");
+        LOG.warn("Mapfile " + mapfile.toString() + " does not exist. Cleaned " +
+          "up info file.  Continuing...Probable DATA LOSS!!!");
         continue;
       }
-      
+      if (isEmptyDataFile(mapfile)) {
+        curfile.delete();
+        // We can have empty data file if data loss in hdfs.
+        LOG.warn("Mapfile " + mapfile.toString() + " has empty data. " +
+          "Deleting.  Continuing...Probable DATA LOSS!!!  See HBASE-646.");
+        continue;
+      }
+
       // TODO: Confirm referent exists.
-      
+
       // Found map and sympathetic info file.  Add this hstorefile to result.
       results.add(curfile);
       if (LOG.isDebugEnabled()) {
@@ -1044,7 +1059,22 @@
     }
     return results;
   }
-  
+
+  /* 
+   * @param mapfile
+   * @return True if the passed mapfile has a zero-length data component (its
+   * broken).
+   * @throws IOException
+   */
+  private boolean isEmptyDataFile(final Path mapfile)
+  throws IOException {
+    // Mapfiles are made of 'data' and 'index' files.  Confirm 'data' is
+    // non-null if it exists (may not have been written to yet).
+    Path dataFile = new Path(mapfile, "data");
+    return this.fs.exists(dataFile) &&
+      this.fs.getFileStatus(dataFile).getLen() == 0;
+  }
+
   //////////////////////////////////////////////////////////////////////////////
   // Bloom filters
   //////////////////////////////////////////////////////////////////////////////