You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2009/07/11 00:58:35 UTC

svn commit: r793134 - in /hadoop/hbase/trunk_on_hadoop-0.18.3: ./ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/ipc/ src/java/org/apache/hadoop/hbase/master/ src/java/org/apache/hadoop/hbase/regionserver/ src/java/org/apache...

Author: apurtell
Date: Fri Jul 10 22:58:34 2009
New Revision: 793134

URL: http://svn.apache.org/viewvc?rev=793134&view=rev
Log:
HBASE-1641, HBASE-1627, HBASE-1644, HBASE-1639, HBASE-698, HBASE-1643, HBASE-1603

Modified:
    hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Result.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/MinorCompactingStoreScanner.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanDeleteTracker.java

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt Fri Jul 10 22:58:34 2009
@@ -251,6 +251,12 @@
                row as when instantiated (Erik Holstad via Stack)
    HBASE-1629  HRS unable to contact master
    HBASE-1633  Can't delete in TRUNK shell; makes it hard doing admin repairs
+   HBASE-1641  Stargate build.xml causes error in Eclipse
+   HBASE-1627  TableInputFormatBase#nextKeyValue catches the wrong exception
+               (Doğacan Güney via Stack)
+   HBASE-1644  Result.row is cached in getRow; this breaks MapReduce
+               (Doğacan Güney via Stack)
+   HBASE-1639  clean checkout with empty hbase-site.xml, zk won't start
 
   IMPROVEMENTS
    HBASE-1089  Add count of regions on filesystem to master UI; add percentage
@@ -458,6 +464,10 @@
    HBASE-1637  Delete client class methods should return itself like Put, Get,
                Scan (Jon Gray via Nitay)
    HBASE-1640  Allow passing arguments to jruby script run when run by bin/hbase shell
+   HBASE-698   HLog recovery is not performed after master failure
+   HBASE-1643  ScanDeleteTracker takes comparator but it unused
+   HBASE-1603  MR failed "RetriesExhaustedException: Trying to contact region server
+               Some server for region TestTable..." -- deubugging
 
   OPTIMIZATIONS
    HBASE-1412  Change values for delete column and column family in KeyValue

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java Fri Jul 10 22:58:34 2009
@@ -606,8 +606,7 @@
         
           // instantiate the location
           location = new HRegionLocation(regionInfo,
-              new HServerAddress(serverAddress));
-          LOG.debug(location);
+            new HServerAddress(serverAddress));
           cacheLocation(tableName, location);
           return location;
         } catch (TableNotFoundException e) {
@@ -744,7 +743,7 @@
             if (rl != null && LOG.isDebugEnabled()) {
               LOG.debug("Removed " + rl.getRegionInfo().getRegionNameAsString() +
                 " for tableName=" + Bytes.toString(tableName) + " from cache " +
-                "because of " + Bytes.toString(row));
+                "because of " + Bytes.toStringBinary(row));
             }
           }
         }
@@ -780,7 +779,9 @@
       byte [] startKey = location.getRegionInfo().getStartKey();
       SoftValueSortedMap<byte [], HRegionLocation> tableLocations =
         getTableLocations(tableName);
-      tableLocations.put(startKey, location);
+      if (tableLocations.put(startKey, location) == null) {
+        LOG.debug("Cached location " + location);
+      }
     }
     
     public HRegionInterface getHRegionConnection(
@@ -973,7 +974,7 @@
       List<Throwable> exceptions = new ArrayList<Throwable>();
       HRegionLocation location = null;
       int tries = 0;
-      while (tries < numRetries) {
+      for (; tries < numRetries;) {
         try {
           location = getRegionLocation(tableName, rowKey, reloadFlag);
         } catch (Throwable t) {
@@ -1003,7 +1004,6 @@
         return;
       }
       boolean retryOnlyOne = false;
-      int tries = 0;
       if (list.size() > 1) {
         Collections.sort(list);
       }
@@ -1014,37 +1014,43 @@
       byte [] currentRegion = location.getRegionInfo().getRegionName();
       byte [] region = currentRegion;
       boolean isLastRow = false;
-      for (int i = 0; i < list.size() && tries < numRetries; i++) {
+      Put [] putarray = new Put[0];
+      for (int i = 0, tries = 0; i < list.size() && tries < this.numRetries; i++) {
         Put put = list.get(i);
         currentPuts.add(put);
+        // If the next Put goes to a new region, then we are to clear
+        // currentPuts now during this cycle.
         isLastRow = (i + 1) == list.size();
         if (!isLastRow) {
           location = getRegionLocationForRowWithRetries(tableName,
-            list.get(i+1).getRow(), false);
+            list.get(i + 1).getRow(), false);
           region = location.getRegionInfo().getRegionName();
         }
         if (!Bytes.equals(currentRegion, region) || isLastRow || retryOnlyOne) {
-          final Put [] puts = currentPuts.toArray(new Put[0]);
+          final Put [] puts = currentPuts.toArray(putarray);
           int index = getRegionServerWithRetries(new ServerCallable<Integer>(
               this, tableName, put.getRow()) {
             public Integer call() throws IOException {
-              int i = server.put(location.getRegionInfo()
-                  .getRegionName(), puts);
-              return i;
+              return server.put(location.getRegionInfo().getRegionName(), puts);
             }
           });
+          // index is == -1 if all puts processed successfully, else its index
+          // of last Put successfully processed.
           if (index != -1) {
             if (tries == numRetries - 1) {
-              throw new RetriesExhaustedException("Some server",
-                  currentRegion, put.getRow(), 
-                  tries, new ArrayList<Throwable>());
+              throw new RetriesExhaustedException("Some server, retryOnlyOne=" +
+                retryOnlyOne + ", index=" + index + ", islastrow=" + isLastRow +
+                ", tries=" + tries + ", numtries=" + numRetries + ", i=" + i +
+                ", listsize=" + list.size() + ", location=" + location +
+                ", region=" + Bytes.toStringBinary(region),
+                currentRegion, put.getRow(), tries, new ArrayList<Throwable>());
             }
             long sleepTime = getPauseTime(tries);
             if (LOG.isDebugEnabled()) {
-              LOG.debug("Reloading region " + Bytes.toString(currentRegion) +
+              LOG.debug("Reloading region " + Bytes.toStringBinary(currentRegion) +
                 " location because regionserver didn't accept updates; " +
-                "tries=" + tries +
-                " of max=" + this.numRetries + ", waiting=" + sleepTime + "ms");
+                "tries=" + tries + " of max=" + this.numRetries +
+                ", waiting=" + sleepTime + "ms");
             }
             try {
               Thread.sleep(sleepTime);
@@ -1054,12 +1060,14 @@
             }
             i = i - puts.length + index;
             retryOnlyOne = true;
+            // Reload location.
             location = getRegionLocationForRowWithRetries(tableName, 
               list.get(i + 1).getRow(), true);
             region = location.getRegionInfo().getRegionName();
-          }
-          else {
+          } else {
+            // Reset these flags/counters on successful batch Put
             retryOnlyOne = false;
+            tries = 0;
           }
           currentRegion = region;
           currentPuts.clear();

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Result.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Result.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Result.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/Result.java Fri Jul 10 22:58:34 2009
@@ -389,6 +389,7 @@
   public void readFields(final DataInput in)
   throws IOException {
     familyMap = null;
+    row = null;
     int numKeys = in.readInt();
     this.kvs = new KeyValue[numKeys];
     if(numKeys == 0) {

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java Fri Jul 10 22:58:34 2009
@@ -47,9 +47,9 @@
     StringBuilder buffer = new StringBuilder("Trying to contact region server ");
     buffer.append(serverName);
     buffer.append(" for region ");
-    buffer.append(regionName == null? "": Bytes.toString(regionName));
+    buffer.append(regionName == null? "": Bytes.toStringBinary(regionName));
     buffer.append(", row '");
-    buffer.append(row == null? "": Bytes.toString(row));
+    buffer.append(row == null? "": Bytes.toStringBinary(row));
     buffer.append("', but failed after ");
     buffer.append(numTries + 1);
     buffer.append(" attempts.\nExceptions:\n");
@@ -59,4 +59,4 @@
     }
     return buffer.toString();
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java Fri Jul 10 22:58:34 2009
@@ -102,7 +102,8 @@
    * 
    * @param regionName
    * @param puts
-   * @return The number of processed put's.
+   * @return The number of processed put's.  Returns -1 if all Puts
+   * processed successfully.
    * @throws IOException
    */
   public int put(final byte[] regionName, final Put [] puts)

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/BaseScanner.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/BaseScanner.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/BaseScanner.java Fri Jul 10 22:58:34 2009
@@ -42,7 +42,6 @@
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
-import org.apache.hadoop.hbase.regionserver.HLog;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -372,33 +371,6 @@
             " is not valid; " + " Server '" + serverAddress + "' startCode: " +
             startCode + " unknown.");
         }
-
-        // Recover the region server's log if there is one.
-        // This is only done from here if we are restarting and there is stale
-        // data in the meta region. Once we are on-line, dead server log
-        // recovery is handled by lease expiration and ProcessServerShutdown
-        if (!this.master.regionManager.isInitialMetaScanComplete() &&
-            serverName != null) {
-          Path logDir =
-            new Path(this.master.rootdir, HLog.getHLogDirectoryName(serverName));
-          try {
-            if (master.fs.exists(logDir)) {
-              this.master.regionManager.splitLogLock.lock();
-              try {
-                HLog.splitLog(master.rootdir, logDir, master.fs,
-                    master.getConfiguration());
-              } finally {
-                this.master.regionManager.splitLogLock.unlock();
-              }
-            }
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Split " + logDir.toString());
-            }
-          } catch (IOException e) {
-            LOG.warn("unable to split region server log because: ", e);
-            throw e;
-          }
-        }
         // Now get the region assigned
         this.master.regionManager.setUnassigned(info, true);
       }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java Fri Jul 10 22:58:34 2009
@@ -40,6 +40,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.dfs.DistributedFileSystem;
 import org.apache.hadoop.dfs.FSConstants;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.ClusterStatus;
@@ -73,6 +74,7 @@
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.ipc.HBaseRPC.Server;
 import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
+import org.apache.hadoop.hbase.regionserver.HLog;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -543,6 +545,7 @@
       // Check if this is a fresh start of the cluster
       if(addresses.size() == 0) {
         LOG.debug("This is a fresh start, proceeding with normal startup");
+        splitLogAfterStartup();
         return;
       }
       LOG.info("This is a failover, ZK inspection begins...");
@@ -579,10 +582,45 @@
       }
       LOG.info("Inspection found " + assignedRegions.size() + " regions, " + 
           (isRootRegionAssigned ? "with -ROOT-" : "but -ROOT- was MIA"));
+      splitLogAfterStartup();
     } catch(IOException ex) {
       ex.printStackTrace();
     }
   }
+  
+  /**
+   * Inspect the log directory to recover any log file without
+   * and active region server.
+   * @throws IOException
+   */
+  private void splitLogAfterStartup() throws IOException {
+    Path logsDirPath =
+      new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME);
+    FileStatus [] logFolders = this.fs.listStatus(logsDirPath);
+    if (logFolders == null || logFolders.length == 0) {
+      LOG.debug("No log files to split, proceeding...");
+      return;
+    }
+    for (FileStatus status : logFolders) {
+      String serverName = status.getPath().getName();
+      LOG.info("Found log folder : " + serverName);
+      if(this.serverManager.getServerInfo(serverName) == null) {
+        LOG.info("Log folder doesn't belong " +
+            "to a known region server, splitting");
+        this.regionManager.splitLogLock.lock();
+        Path logDir =
+          new Path(this.rootdir, HLog.getHLogDirectoryName(serverName));
+        try {
+          HLog.splitLog(this.rootdir, logDir, this.fs,
+              getConfiguration());
+        } finally {
+          this.regionManager.splitLogLock.unlock();
+        }
+      } else {
+        LOG.info("Log folder belongs to an existing region server");
+      }
+    }
+  }
 
   /*
    * Start up all services. If any of these threads gets an unhandled exception

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java Fri Jul 10 22:58:34 2009
@@ -90,7 +90,7 @@
       int qualifierLength, long timestamp) {
 
     // Check against DeleteFamily
-    if(timestamp <= familyStamp) {
+    if (timestamp <= familyStamp) {
       return true;
     }
 

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Fri Jul 10 22:58:34 2009
@@ -941,7 +941,7 @@
       // all and sundry.
       this.log.abortCacheFlush();
       DroppedSnapshotException dse = new DroppedSnapshotException("region: " +
-          Bytes.toString(getRegionName()));
+          Bytes.toStringBinary(getRegionName()));
       dse.initCause(t);
       throw dse;
     }
@@ -1383,7 +1383,7 @@
       requestFlush();
       if (!blocked) {
         LOG.info("Blocking updates for '" + Thread.currentThread().getName() +
-          "' on region " + Bytes.toString(getRegionName()) +
+          "' on region " + Bytes.toStringBinary(getRegionName()) +
           ": memstore size " +
           StringUtils.humanReadableInt(this.memstoreSize.get()) +
           " is >= than blocking " +
@@ -1521,9 +1521,9 @@
     if(!rowIsInRange(regionInfo, row)) {
       throw new WrongRegionException("Requested row out of range for " +
           "HRegion " + this + ", startKey='" +
-          Bytes.toString(regionInfo.getStartKey()) + "', getEndKey()='" +
-          Bytes.toString(regionInfo.getEndKey()) + "', row='" +
-          Bytes.toString(row) + "'");
+          Bytes.toStringBinary(regionInfo.getStartKey()) + "', getEndKey()='" +
+          Bytes.toStringBinary(regionInfo.getEndKey()) + "', row='" +
+          Bytes.toStringBinary(row) + "'");
     }
   }
 

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Fri Jul 10 22:58:34 2009
@@ -1768,6 +1768,7 @@
   
   public int put(final byte[] regionName, final Put [] puts)
   throws IOException {
+    // Count of Puts processed.
     int i = 0;
     checkOpen();
     try {
@@ -1779,13 +1780,15 @@
         locks[i] = getLockFromId(puts[i].getLockId());
         region.put(puts[i], locks[i]);
       }
-    } catch(WrongRegionException ex) {
+    } catch (WrongRegionException ex) {
+      LOG.debug("Batch puts: " + i, ex);
       return i;
     } catch (NotServingRegionException ex) {
       return i;
     } catch (Throwable t) {
       throw convertThrowableToIOE(cleanup(t));
     }
+    // All have been processed successfully.
     return -1;
   }
   

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java Fri Jul 10 22:58:34 2009
@@ -54,8 +54,8 @@
     this.comparator = new KVScannerComparator(comparator);
     this.heap = new PriorityQueue<KeyValueScanner>(scanners.length, 
         this.comparator);
-    for(KeyValueScanner scanner : scanners) {
-      if(scanner.peek() != null) {
+    for (KeyValueScanner scanner : scanners) {
+      if (scanner.peek() != null) {
         this.heap.add(scanner);
       }
     }
@@ -102,7 +102,7 @@
     InternalScanner currentAsInternal = (InternalScanner)this.current;
     currentAsInternal.next(result);
     KeyValue pee = this.current.peek();
-    if(pee == null) {
+    if (pee == null) {
       this.current.close();
     } else {
       this.heap.add(this.current);

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/MinorCompactingStoreScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/MinorCompactingStoreScanner.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/MinorCompactingStoreScanner.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/MinorCompactingStoreScanner.java Fri Jul 10 22:58:34 2009
@@ -41,7 +41,7 @@
   MinorCompactingStoreScanner(Store store,
                               KeyValueScanner [] scanners) {
     comparator = store.comparator;
-    deleteTracker = new ScanDeleteTracker(store.comparator.getRawComparator());
+    deleteTracker = new ScanDeleteTracker();
     KeyValue firstKv = KeyValue.createFirstOnRow(HConstants.EMPTY_START_ROW);
     for (KeyValueScanner scanner : scanners ) {
       scanner.seek(firstKv);
@@ -53,7 +53,7 @@
   MinorCompactingStoreScanner(String cfName, KeyValue.KVComparator comparator,
                               KeyValueScanner [] scanners) {
     this.comparator = comparator;
-    deleteTracker = new ScanDeleteTracker(comparator.getRawComparator());
+    deleteTracker = new ScanDeleteTracker();
 
     KeyValue firstKv = KeyValue.createFirstOnRow(HConstants.EMPTY_START_ROW);
     for (KeyValueScanner scanner : scanners ) {

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java Fri Jul 10 22:58:34 2009
@@ -47,14 +47,11 @@
   private byte deleteType = 0;
   private long deleteTimestamp = 0L;
 
-  private KeyValue.KeyComparator comparator;
-  
   /**
    * Constructor for ScanDeleteTracker
-   * @param comparator
    */
-  public ScanDeleteTracker(KeyValue.KeyComparator comparator) {
-    this.comparator = comparator;
+  public ScanDeleteTracker() {
+    super();
   }
   
   /**
@@ -71,15 +68,15 @@
   @Override
   public void add(byte[] buffer, int qualifierOffset, int qualifierLength,
       long timestamp, byte type) {
-    if(timestamp > familyStamp) {
-      if(type == KeyValue.Type.DeleteFamily.getCode()) {
+    if (timestamp > familyStamp) {
+      if (type == KeyValue.Type.DeleteFamily.getCode()) {
         familyStamp = timestamp;
         return;
       }
 
-      if(deleteBuffer != null && type < deleteType) {
+      if (deleteBuffer != null && type < deleteType) {
         // same column, so ignore less specific delete
-        if(Bytes.compareTo(deleteBuffer, deleteOffset, deleteLength,
+        if (Bytes.compareTo(deleteBuffer, deleteOffset, deleteLength,
             buffer, qualifierOffset, qualifierLength) == 0){
           return;
         }
@@ -107,17 +104,16 @@
   @Override
   public boolean isDeleted(byte [] buffer, int qualifierOffset,
       int qualifierLength, long timestamp) {
-    if(timestamp < familyStamp) {
+    if (timestamp < familyStamp) {
       return true;
     }
     
-    if(deleteBuffer != null) {
-      // TODO ryan use a specific comparator
+    if (deleteBuffer != null) {
       int ret = Bytes.compareTo(deleteBuffer, deleteOffset, deleteLength,
           buffer, qualifierOffset, qualifierLength);
 
-      if(ret == 0) {
-        if(deleteType == KeyValue.Type.DeleteColumn.getCode()) {
+      if (ret == 0) {
+        if (deleteType == KeyValue.Type.DeleteColumn.getCode()) {
           return true;
         }
         // Delete (aka DeleteVersion)
@@ -158,5 +154,4 @@
   public void update() {
     this.reset();
   }
-
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java Fri Jul 10 22:58:34 2009
@@ -24,7 +24,6 @@
 
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.RowFilterInterface;
 import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -54,15 +53,14 @@
     this.tr = scan.getTimeRange();
     this.oldestStamp = System.currentTimeMillis() - ttl;
     this.rowComparator = rowComparator;
-    // shouldn't this be ScanDeleteTracker?
-    this.deletes =  new ScanDeleteTracker(rowComparator);
+    this.deletes =  new ScanDeleteTracker();
     this.startKey = KeyValue.createFirstOnRow(scan.getStartRow());
     this.stopKey = KeyValue.createFirstOnRow(scan.getStopRow());
     this.filter = scan.getFilter();
     this.oldFilter = scan.getOldFilter();
     
     // Single branch to deal with two types of reads (columns vs all in family)
-    if(columns == null || columns.size() == 0) {
+    if (columns == null || columns.size() == 0) {
       // use a specialized scan for wildcard column tracker.
       this.columns = new ScanWildcardColumnTracker(maxVersions);
     } else {
@@ -166,7 +164,8 @@
       return MatchCode.SKIP;
     }
 
-    if (deletes.isDeleted(bytes, offset, qualLength, timestamp)) {
+    if (!this.deletes.isEmpty() &&
+        deletes.isDeleted(bytes, offset, qualLength, timestamp)) {
       return MatchCode.SKIP;
     }
 

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java Fri Jul 10 22:58:34 2009
@@ -65,7 +65,7 @@
 
     // Combine all seeked scanners with a heap
     heap = new KeyValueHeap(
-        scanners.toArray(new KeyValueScanner[scanners.size()]), store.comparator);
+      scanners.toArray(new KeyValueScanner[scanners.size()]), store.comparator);
 
     this.store.addChangedReaderObserver(this);
   }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java Fri Jul 10 22:58:34 2009
@@ -32,6 +32,7 @@
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.zookeeper.server.ServerConfig;
 import org.apache.zookeeper.server.ZooKeeperServerMain;
 import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
@@ -85,6 +86,19 @@
     }
   }
 
+  private static boolean addressIsLocalHost(String address) {
+    return address.equals("localhost") || address.equals("127.0.0.1");
+  }
+
+  private static boolean hostEquals(String addrA, String addrB) {
+    if (addrA.contains(".") && addrB.contains(".")) {
+      return addrA.equals(addrB);
+    }
+    String hostA = StringUtils.simpleHostname(addrA);
+    String hostB = StringUtils.simpleHostname(addrB);
+    return hostA.equals(hostB);
+  }
+
   private static void writeMyID(Properties properties) throws UnknownHostException, IOException {
     HBaseConfiguration conf = new HBaseConfiguration();
     String myAddress = DNS.getDefaultHost(
@@ -101,7 +115,9 @@
         long id = Long.parseLong(key.substring(dot + 1));
         String[] parts = value.split(":");
         String address = parts[0];
-        if (myAddress.equals(address)) {
+        if (addressIsLocalHost(address) || hostEquals(myAddress, address)) {
+          LOG.debug("found my address: " + myAddress + ", in list: " + address +
+                    ", setting myId to " + id);
           myId = id;
           break;
         }

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanDeleteTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanDeleteTracker.java?rev=793134&r1=793133&r2=793134&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanDeleteTracker.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanDeleteTracker.java Fri Jul 10 22:58:34 2009
@@ -33,7 +33,7 @@
   private byte deleteType = 0;
   
   public void setUp(){
-    sdt = new ScanDeleteTracker(KeyValue.KEY_COMPARATOR);
+    sdt = new ScanDeleteTracker();
   }
   
   public void testDeletedBy_Delete() {