You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2010/05/07 21:26:51 UTC

svn commit: r942186 [8/18] - in /hadoop/hbase/trunk: ./ contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/ core/src/main/java/org/apache/hadoop/hbase/ core/src/main/java/org/apache/hadoop/hbase/client/ core/src/main/java/org/apache/h...

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Fri May  7 19:26:45 2010
@@ -126,7 +126,7 @@ public class HRegionServer implements HC
   // plain boolean so we can pass a reference to Chore threads.  Otherwise,
   // Chore threads need to know about the hosting class.
   protected final AtomicBoolean stopRequested = new AtomicBoolean(false);
-  
+
   protected final AtomicBoolean quiesced = new AtomicBoolean(false);
 
   // Go down hard.  Used if file system becomes unavailable and also in
@@ -135,7 +135,7 @@ public class HRegionServer implements HC
 
   // If false, the file system has become unavailable
   protected volatile boolean fsOk;
-  
+
   protected HServerInfo serverInfo;
   protected final Configuration conf;
 
@@ -162,7 +162,7 @@ public class HRegionServer implements HC
   protected final int numRegionsToReport;
 
   private final long maxScannerResultSize;
-  
+
   // Remote HMaster
   private HMasterRegionInterface hbaseMaster;
 
@@ -172,7 +172,7 @@ public class HRegionServer implements HC
 
   // Leases
   private Leases leases;
-  
+
   // Request counter
   private volatile AtomicInteger requestCount = new AtomicInteger();
 
@@ -180,25 +180,25 @@ public class HRegionServer implements HC
   // is name of the webapp and the attribute name used stuffing this instance
   // into web context.
   InfoServer infoServer;
-  
+
   /** region server process name */
   public static final String REGIONSERVER = "regionserver";
-  
+
   /*
    * Space is reserved in HRS constructor and then released when aborting
    * to recover from an OOME. See HBASE-706.  TODO: Make this percentage of the
    * heap or a minimum.
    */
   private final LinkedList<byte[]> reservedSpace = new LinkedList<byte []>();
-  
+
   private RegionServerMetrics metrics;
 
   // Compactions
   CompactSplitThread compactSplitThread;
 
-  // Cache flushing  
+  // Cache flushing
   MemStoreFlusher cacheFlusher;
-  
+
   /* Check for major compactions.
    */
   Chore majorCompactionChecker;
@@ -244,7 +244,7 @@ public class HRegionServer implements HC
     machineName = DNS.getDefaultHost(
         conf.get("hbase.regionserver.dns.interface","default"),
         conf.get("hbase.regionserver.dns.nameserver","default"));
-    String addressStr = machineName + ":" + 
+    String addressStr = machineName + ":" +
       conf.get(REGIONSERVER_PORT, Integer.toString(DEFAULT_REGIONSERVER_PORT));
     // This is not necessarily the address we will run with.  The address we
     // use will be in #serverInfo data member.  For example, we may have been
@@ -259,7 +259,7 @@ public class HRegionServer implements HC
     this.connection = ServerConnectionManager.getConnection(conf);
 
     this.isOnline = false;
-    
+
     // Config'ed params
     this.numRetries =  conf.getInt("hbase.client.retries.number", 2);
     this.threadWakeFrequency = conf.getInt(THREAD_WAKE_FREQUENCY, 10 * 1000);
@@ -268,14 +268,14 @@ public class HRegionServer implements HC
     sleeper = new Sleeper(this.msgInterval, this.stopRequested);
 
     this.maxScannerResultSize = conf.getLong(
-            HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, 
+            HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
             HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
-              
+
     // Task thread to process requests from Master
     this.worker = new Worker();
 
-    this.numRegionsToReport =                                        
-      conf.getInt("hbase.regionserver.numregionstoreport", 10);      
+    this.numRegionsToReport =
+      conf.getInt("hbase.regionserver.numregionstoreport", 10);
 
     this.rpcTimeout = conf.getLong(HBASE_REGIONSERVER_LEASE_PERIOD_KEY, DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD);
 
@@ -294,7 +294,7 @@ public class HRegionServer implements HC
     this.shutdownHDFS.set(true);
 
     // Server to handle client requests
-    this.server = HBaseRPC.getServer(this, address.getBindAddress(), 
+    this.server = HBaseRPC.getServer(this, address.getBindAddress(),
       address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10),
       false, conf);
     this.server.setErrorHandler(this);
@@ -326,13 +326,13 @@ public class HRegionServer implements HC
 
     // Cache flushing thread.
     this.cacheFlusher = new MemStoreFlusher(conf, this);
-    
+
     // Compaction thread
     this.compactSplitThread = new CompactSplitThread(this);
-    
+
     // Log rolling thread
     this.hlogRoller = new LogRoller(this);
-    
+
     // Background thread to check for major compactions; needed if region
     // has not gotten updates in a while.  Make it run at a lesser frequency.
     int multiplier = this.conf.getInt(THREAD_WAKE_FREQUENCY +
@@ -417,7 +417,7 @@ public class HRegionServer implements HC
 
   /**
    * The HRegionServer sticks in this loop until closed. It repeatedly checks
-   * in with the HMaster, sending heartbeats & reports, and receiving HRegion 
+   * in with the HMaster, sending heartbeats & reports, and receiving HRegion
    * load/unload instructions.
    */
   public void run() {
@@ -510,7 +510,7 @@ public class HRegionServer implements HC
                   } catch (IOException e) {
                     this.abortRequested = true;
                     this.stopRequested.set(true);
-                    e = RemoteExceptionHandler.checkIOException(e); 
+                    e = RemoteExceptionHandler.checkIOException(e);
                     LOG.fatal("error restarting server", e);
                     break;
                   }
@@ -577,7 +577,7 @@ public class HRegionServer implements HC
           }
         }
         now = System.currentTimeMillis();
-        HMsg msg = this.outboundMsgs.poll((msgInterval - (now - lastMsg)), 
+        HMsg msg = this.outboundMsgs.poll((msgInterval - (now - lastMsg)),
           TimeUnit.MILLISECONDS);
         // If we got something, add it to list of things to send.
         if (msg != null) outboundMessages.add(msg);
@@ -787,16 +787,16 @@ public class HRegionServer implements HC
       stores += r.stores.size();
       for (Store store: r.stores.values()) {
         storefiles += store.getStorefilesCount();
-        storefileSizeMB += 
+        storefileSizeMB +=
           (int)(store.getStorefilesSize()/1024/1024);
-        storefileIndexSizeMB += 
+        storefileIndexSizeMB +=
           (int)(store.getStorefilesIndexSize()/1024/1024);
       }
     }
     return new HServerLoad.RegionLoad(name, stores, storefiles,
       storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB);
   }
- 
+
   /**
    * @param regionName
    * @return An instance of RegionLoad.
@@ -871,12 +871,12 @@ public class HRegionServer implements HC
     }
     return stop;
   }
-  
-  
+
+
   /**
    * Checks to see if the file system is still accessible.
    * If not, sets abortRequested and stopRequested
-   * 
+   *
    * @return false if file system is not available
    */
   protected boolean checkFileSystem() {
@@ -901,7 +901,7 @@ public class HRegionServer implements HC
     private final HRegionServer instance;
     private final Thread mainThread;
     private final AtomicBoolean shutdownHDFS;
-    
+
     /**
      * @param instance
      * @param mainThread
@@ -917,7 +917,7 @@ public class HRegionServer implements HC
     @Override
     public void run() {
       LOG.info("Starting shutdown thread");
-      
+
       // tell the region server to stop
       this.instance.stop();
 
@@ -966,7 +966,7 @@ public class HRegionServer implements HC
   }
 
   /**
-   * Report the status of the server. A server is online once all the startup 
+   * Report the status of the server. A server is online once all the startup
    * is completed (setting up filesystem, starting service threads, etc.). This
    * method is designed mostly to be useful in tests.
    * @return true if online, false if not.
@@ -974,7 +974,7 @@ public class HRegionServer implements HC
   public boolean isOnline() {
     return isOnline;
   }
-    
+
   private HLog setupHLog() throws RegionServerRunningException,
     IOException {
     Path oldLogDir = new Path(rootDir, HREGION_OLDLOGDIR_NAME);
@@ -991,17 +991,17 @@ public class HRegionServer implements HC
     return newlog;
   }
 
-  // instantiate 
+  // instantiate
   protected HLog instantiateHLog(Path logdir, Path oldLogDir) throws IOException {
     HLog newlog = new HLog(fs, logdir, oldLogDir, conf, hlogRoller);
     return newlog;
   }
 
-  
+
   protected LogRoller getLogRoller() {
     return hlogRoller;
-  }  
-  
+  }
+
   /*
    * @param interval Interval since last time metrics were called.
    */
@@ -1030,7 +1030,7 @@ public class HRegionServer implements HC
         synchronized (r.stores) {
           stores += r.stores.size();
           for(Map.Entry<byte [], Store> ee: r.stores.entrySet()) {
-            Store store = ee.getValue(); 
+            Store store = ee.getValue();
             storefiles += store.getStorefilesCount();
             storefileIndexSize += store.getStorefilesIndexSize();
           }
@@ -1091,7 +1091,7 @@ public class HRegionServer implements HC
     Threads.setDaemonThreadRunning(this.workerThread, n + ".worker", handler);
     Threads.setDaemonThreadRunning(this.majorCompactionChecker,
         n + ".majorCompactionChecker", handler);
-    
+
     // Leases is not a Thread. Internally it runs a daemon thread.  If it gets
     // an unhandled exception, it will just exit.
     this.leases.setName(n + ".leaseChecker");
@@ -1121,7 +1121,7 @@ public class HRegionServer implements HC
           // update HRS server info
           this.serverInfo.setInfoPort(port);
         }
-      } 
+      }
     }
 
     // Start Server.  This service is like leases in that it internally runs
@@ -1191,7 +1191,7 @@ public class HRegionServer implements HC
     this.stopRequested.set(true);
     synchronized(this) {
       // Wakes run() if it is sleeping
-      notifyAll(); // FindBugs NN_NAKED_NOTIFY  
+      notifyAll(); // FindBugs NN_NAKED_NOTIFY
     }
   }
 
@@ -1208,7 +1208,7 @@ public class HRegionServer implements HC
     stop();
   }
 
-  /** 
+  /**
    * Wait on all threads to finish.
    * Presumption is that all closes and stops have already been called.
    */
@@ -1276,7 +1276,7 @@ public class HRegionServer implements HC
         if (LOG.isDebugEnabled())
           LOG.debug("sending initial server load: " + hsl);
         lastMsg = System.currentTimeMillis();
-        boolean startCodeOk = false; 
+        boolean startCodeOk = false;
         while(!startCodeOk) {
           serverInfo.setStartCode(System.currentTimeMillis());
           startCodeOk = zooKeeperWrapper.writeRSLocation(this.serverInfo);
@@ -1308,13 +1308,13 @@ public class HRegionServer implements HC
   private void reportClose(final HRegionInfo region, final byte[] message) {
     this.outboundMsgs.add(new HMsg(HMsg.Type.MSG_REPORT_CLOSE, region, message));
   }
-  
+
   /**
    * Add to the outbound message buffer
-   * 
-   * When a region splits, we need to tell the master that there are two new 
+   *
+   * When a region splits, we need to tell the master that there are two new
    * regions that need to be assigned.
-   * 
+   *
    * We do not need to inform the master about the old region, because we've
    * updated the meta or root regions, and the master will pick that up on its
    * next rescan of the root or meta tables.
@@ -1347,7 +1347,7 @@ public class HRegionServer implements HC
   final BlockingQueue<ToDoEntry> toDo = new LinkedBlockingQueue<ToDoEntry>();
   private Worker worker;
   private Thread workerThread;
-  
+
   /** Thread that performs long running requests from the master */
   class Worker implements Runnable {
     void stop() {
@@ -1355,7 +1355,7 @@ public class HRegionServer implements HC
         toDo.notifyAll();
       }
     }
-    
+
     public void run() {
       try {
         while(!stopRequested.get()) {
@@ -1417,12 +1417,12 @@ public class HRegionServer implements HC
                 e.msg.isType(Type.MSG_REGION_MAJOR_COMPACT),
                 e.msg.getType().name());
               break;
-              
+
             case MSG_REGION_FLUSH:
               region = getRegion(info.getRegionName());
               region.flushcache();
               break;
-              
+
             case TESTING_MSG_BLOCK_RS:
               while (!stopRequested.get()) {
                 Threads.sleep(1000);
@@ -1500,9 +1500,9 @@ public class HRegionServer implements HC
         this.lock.writeLock().unlock();
       }
     }
-    reportOpen(regionInfo); 
+    reportOpen(regionInfo);
   }
-  
+
   protected HRegion instantiateRegion(final HRegionInfo regionInfo)
       throws IOException {
     HRegion r = new HRegion(HTableDescriptor.getTableDir(rootDir, regionInfo
@@ -1513,9 +1513,9 @@ public class HRegionServer implements HC
         addProcessingMessage(regionInfo);
       }
     });
-    return r; 
+    return r;
   }
-  
+
   /**
    * Add a MSG_REPORT_PROCESS_OPEN to the outbound queue.
    * This method is called while region is in the queue of regions to process
@@ -1569,7 +1569,7 @@ public class HRegionServer implements HC
     }
     return regionsToClose;
   }
-  
+
   /*
    * Thread to run close of a region.
    */
@@ -1580,7 +1580,7 @@ public class HRegionServer implements HC
       super(Thread.currentThread().getName() + ".regionCloser." + r.toString());
       this.r = r;
     }
-    
+
     @Override
     public void run() {
       try {
@@ -1652,7 +1652,7 @@ public class HRegionServer implements HC
   }
 
 
-  public Result getClosestRowBefore(final byte [] regionName, 
+  public Result getClosestRowBefore(final byte [] regionName,
     final byte [] row, final byte [] family)
   throws IOException {
     checkOpen();
@@ -1660,8 +1660,8 @@ public class HRegionServer implements HC
     try {
       // locate the region we're operating on
       HRegion region = getRegion(regionName);
-      // ask the region for all the data 
-      
+      // ask the region for all the data
+
       Result r = region.getClosestRowBefore(row, family);
       return r;
     } catch (Throwable t) {
@@ -1697,7 +1697,7 @@ public class HRegionServer implements HC
   throws IOException {
     if (put.getRow() == null)
       throw new IllegalArgumentException("update has null row");
-    
+
     checkOpen();
     this.requestCount.incrementAndGet();
     HRegion region = getRegion(regionName);
@@ -1745,7 +1745,7 @@ public class HRegionServer implements HC
   }
 
   /**
-   * 
+   *
    * @param regionName
    * @param row
    * @param family
@@ -1756,12 +1756,12 @@ public class HRegionServer implements HC
    * @return true if the new put was execute, false otherwise
    */
   public boolean checkAndPut(final byte[] regionName, final byte [] row,
-      final byte [] family, final byte [] qualifier, final byte [] value, 
+      final byte [] family, final byte [] qualifier, final byte [] value,
       final Put put) throws IOException{
     //Getting actual value
     Get get = new Get(row);
     get.addColumn(family, qualifier);
-    
+
     checkOpen();
     this.requestCount.incrementAndGet();
     HRegion region = getRegion(regionName);
@@ -1776,7 +1776,7 @@ public class HRegionServer implements HC
       throw convertThrowableToIOE(cleanup(t));
     }
   }
-  
+
   //
   // remote scanner interface
   //
@@ -1801,7 +1801,7 @@ public class HRegionServer implements HC
       throw convertThrowableToIOE(cleanup(t, "Failed openScanner"));
     }
   }
-  
+
   protected long addScanner(InternalScanner s) throws LeaseStillHeldException {
     long scannerId = -1L;
     scannerId = rand.nextLong();
@@ -1830,7 +1830,7 @@ public class HRegionServer implements HC
       try {
         checkOpen();
       } catch (IOException e) {
-        // If checkOpen failed, server not running or filesystem gone, 
+        // If checkOpen failed, server not running or filesystem gone,
         // cancel this lease; filesystem is gone or we're closing or something.
         this.leases.cancelLease(scannerName);
         throw e;
@@ -1869,7 +1869,7 @@ public class HRegionServer implements HC
       }
       throw convertThrowableToIOE(cleanup(t));
     }
-  } 
+  }
 
   public void close(final long scannerId) throws IOException {
     try {
@@ -1886,17 +1886,17 @@ public class HRegionServer implements HC
     }
   }
 
-  /** 
+  /**
    * Instantiated as a scanner lease.
    * If the lease times out, the scanner is closed
    */
   private class ScannerListener implements LeaseListener {
     private final String scannerName;
-    
+
     ScannerListener(final String n) {
       this.scannerName = n;
     }
-    
+
     public void leaseExpired() {
       LOG.info("Scanner " + this.scannerName + " lease expired");
       InternalScanner s = scanners.remove(this.scannerName);
@@ -1909,7 +1909,7 @@ public class HRegionServer implements HC
       }
     }
   }
-  
+
   //
   // Methods that do the actual work for the remote API
   //
@@ -2076,7 +2076,7 @@ public class HRegionServer implements HC
   public InfoServer getInfoServer() {
     return infoServer;
   }
-  
+
   /**
    * @return true if a stop has been requested.
    */
@@ -2085,7 +2085,7 @@ public class HRegionServer implements HC
   }
 
   /**
-   * 
+   *
    * @return the configuration
    */
   public Configuration getConfiguration() {
@@ -2107,7 +2107,7 @@ public class HRegionServer implements HC
   public HRegion [] getOnlineRegionsAsArray() {
     return getOnlineRegions().toArray(new HRegion[0]);
   }
-  
+
   /**
    * @return The HRegionInfos from online regions sorted
    */
@@ -2120,10 +2120,10 @@ public class HRegionServer implements HC
     }
     return result;
   }
-  
+
   /**
-   * This method removes HRegion corresponding to hri from the Map of onlineRegions.  
-   * 
+   * This method removes HRegion corresponding to hri from the Map of onlineRegions.
+   *
    * @param hri the HRegionInfo corresponding to the HRegion to-be-removed.
    * @return the removed HRegion, or null if the HRegion was not in onlineRegions.
    */
@@ -2158,7 +2158,7 @@ public class HRegionServer implements HC
     }
     return sortedRegions;
   }
-  
+
   /**
    * @param regionName
    * @return HRegion for the passed <code>regionName</code> or null if named
@@ -2177,8 +2177,8 @@ public class HRegionServer implements HC
   public FlushRequester getFlushRequester() {
     return this.cacheFlusher;
   }
-  
-  /** 
+
+  /**
    * Protected utility method for safely obtaining an HRegion handle.
    * @param regionName Name of online {@link HRegion} to return
    * @return {@link HRegion} for <code>regionName</code>
@@ -2221,10 +2221,10 @@ public class HRegionServer implements HC
     }
     return regions.toArray(new HRegionInfo[regions.size()]);
   }
-  
-  /**  
+
+  /**
    * Called to verify that this server is up and running.
-   * 
+   *
    * @throws IOException
    */
   protected void checkOpen() throws IOException {
@@ -2236,14 +2236,14 @@ public class HRegionServer implements HC
       throw new IOException("File system not available");
     }
   }
- 
+
   /**
    * @return Returns list of non-closed regions hosted on this server.  If no
    * regions to check, returns an empty list.
    */
   protected Set<HRegion> getRegionsToCheck() {
     HashSet<HRegion> regionsToCheck = new HashSet<HRegion>();
-    //TODO: is this locking necessary? 
+    //TODO: is this locking necessary?
     lock.readLock().lock();
     try {
       regionsToCheck.addAll(this.onlineRegions.values());
@@ -2260,9 +2260,9 @@ public class HRegionServer implements HC
     return regionsToCheck;
   }
 
-  public long getProtocolVersion(final String protocol, 
+  public long getProtocolVersion(final String protocol,
       final long clientVersion)
-  throws IOException {  
+  throws IOException {
     if (protocol.equals(HRegionInterface.class.getName())) {
       return HBaseRPCProtocolVersion.versionID;
     }
@@ -2317,21 +2317,21 @@ public class HRegionServer implements HC
   public HServerInfo getServerInfo() { return this.serverInfo; }
 
   /** {@inheritDoc} */
-  public long incrementColumnValue(byte [] regionName, byte [] row, 
+  public long incrementColumnValue(byte [] regionName, byte [] row,
       byte [] family, byte [] qualifier, long amount, boolean writeToWAL)
   throws IOException {
     checkOpen();
 
     if (regionName == null) {
-      throw new IOException("Invalid arguments to incrementColumnValue " + 
+      throw new IOException("Invalid arguments to incrementColumnValue " +
       "regionName is null");
     }
     requestCount.incrementAndGet();
     try {
       HRegion region = getRegion(regionName);
-      long retval = region.incrementColumnValue(row, family, qualifier, amount, 
+      long retval = region.incrementColumnValue(row, family, qualifier, amount,
           writeToWAL);
-      
+
       return retval;
     } catch (IOException e) {
       checkFileSystem();
@@ -2348,7 +2348,7 @@ public class HRegionServer implements HC
     }
     return regions;
   }
-  
+
   /** {@inheritDoc} */
   public HServerInfo getHServerInfo() throws IOException {
     return serverInfo;
@@ -2384,7 +2384,7 @@ public class HRegionServer implements HC
   //
   // Main program and support routines
   //
-  
+
   /**
    * @param hrs
    * @return Thread the RegionServer is running in correctly named.
@@ -2410,7 +2410,7 @@ public class HRegionServer implements HC
   private static void printUsageAndExit() {
     printUsageAndExit(null);
   }
-  
+
   private static void printUsageAndExit(final String message) {
     if (message != null) {
       System.err.println(message);
@@ -2448,7 +2448,7 @@ public class HRegionServer implements HC
       printUsageAndExit();
     }
     Configuration conf = HBaseConfiguration.create();
-    
+
     // Process command-line args. TODO: Better cmd-line processing
     // (but hopefully something not as painful as cli options).
     for (String cmd: args) {
@@ -2473,13 +2473,13 @@ public class HRegionServer implements HC
         }
         break;
       }
-      
+
       if (cmd.equals("stop")) {
         printUsageAndExit("To shutdown the regionserver run " +
           "bin/hbase-daemon.sh stop regionserver or send a kill signal to" +
           "the regionserver pid");
       }
-      
+
       // Print out usage if we get to here.
       printUsageAndExit();
     }

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java Fri May  7 19:26:45 2010
@@ -27,10 +27,10 @@ import java.util.List;
 
 /**
  * Internal scanners differ from client-side scanners in that they operate on
- * HStoreKeys and byte[] instead of RowResults. This is because they are 
+ * HStoreKeys and byte[] instead of RowResults. This is because they are
  * actually close to how the data is physically stored, and therefore it is more
- * convenient to interact with them that way. It is also much easier to merge 
- * the results across SortedMaps than RowResults. 
+ * convenient to interact with them that way. It is also much easier to merge
+ * the results across SortedMaps than RowResults.
  *
  * <p>Additionally, we need to be able to determine if the scanner is doing
  * wildcard column matches (when only a column family is specified or if a
@@ -50,7 +50,7 @@ public interface InternalScanner extends
 
   /**
    * Grab the next row's worth of values with a limit on the number of values
-   * to return. 
+   * to return.
    * @param result return output array
    * @param limit limit on row count to get
    * @return true if more rows exist after this one, false if scanner is done

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java Fri May  7 19:26:45 2010
@@ -46,14 +46,14 @@ public class KeyValueHeap implements Key
   private KVScannerComparator comparator;
 
   /**
-   * Constructor.  This KeyValueHeap will handle closing of passed in 
+   * Constructor.  This KeyValueHeap will handle closing of passed in
    * KeyValueScanners.
    * @param scanners
    * @param comparator
    */
   public KeyValueHeap(KeyValueScanner [] scanners, KVComparator comparator) {
     this.comparator = new KVScannerComparator(comparator);
-    this.heap = new PriorityQueue<KeyValueScanner>(scanners.length, 
+    this.heap = new PriorityQueue<KeyValueScanner>(scanners.length,
         this.comparator);
     for (KeyValueScanner scanner : scanners) {
       if (scanner.peek() != null) {
@@ -64,14 +64,14 @@ public class KeyValueHeap implements Key
     }
     this.current = heap.poll();
   }
-  
+
   public KeyValue peek() {
     if(this.current == null) {
       return null;
     }
     return this.current.peek();
   }
-  
+
   public KeyValue next()  {
     if(this.current == null) {
       return null;
@@ -101,7 +101,7 @@ public class KeyValueHeap implements Key
    * InternalScanner as well as KeyValueScanner (a {@link StoreScanner}).
    * @param result
    * @param limit
-   * @return true if there are more keys, false if all scanners are done 
+   * @return true if there are more keys, false if all scanners are done
    */
   public boolean next(List<KeyValue> result, int limit) throws IOException {
     InternalScanner currentAsInternal = (InternalScanner)this.current;
@@ -124,7 +124,7 @@ public class KeyValueHeap implements Key
    * This can ONLY be called when you are using Scanners that implement
    * InternalScanner as well as KeyValueScanner (a {@link StoreScanner}).
    * @param result
-   * @return true if there are more keys, false if all scanners are done 
+   * @return true if there are more keys, false if all scanners are done
    */
   public boolean next(List<KeyValue> result) throws IOException {
     return next(result, -1);
@@ -168,9 +168,9 @@ public class KeyValueHeap implements Key
       scanner.close();
     }
   }
-  
+
   /**
-   * Seeks all scanners at or below the specified seek key.  If we earlied-out 
+   * Seeks all scanners at or below the specified seek key.  If we earlied-out
    * of a row, we may end up skipping values that were never reached yet.
    * Rather than iterating down, we want to give the opportunity to re-seek.
    * <p>

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java Fri May  7 19:26:45 2010
@@ -30,20 +30,20 @@ public interface KeyValueScanner {
    * @return the next KeyValue
    */
   public KeyValue peek();
-  
+
   /**
-   * Return the next KeyValue in this scanner, iterating the scanner 
+   * Return the next KeyValue in this scanner, iterating the scanner
    * @return the next KeyValue
    */
   public KeyValue next();
-  
+
   /**
    * Seek the scanner at or after the specified KeyValue.
    * @param key seek value
    * @return true if scanner has values left, false if end of scanner
    */
   public boolean seek(KeyValue key);
-  
+
   /**
    * Close the KeyValue scanner.
    */

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java Fri May  7 19:26:45 2010
@@ -61,7 +61,7 @@ class KeyValueSkipListSet implements Nav
    */
   static class MapEntryIterator implements Iterator<KeyValue> {
     private final Iterator<Map.Entry<KeyValue, KeyValue>> iterator;
-    
+
     MapEntryIterator(final Iterator<Map.Entry<KeyValue, KeyValue>> i) {
       this.iterator = i;
     }

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java Fri May  7 19:26:45 2010
@@ -32,13 +32,13 @@ import java.util.concurrent.locks.Reentr
 
 /**
  * Runs periodically to determine if the HLog should be rolled.
- * 
+ *
  * NOTE: This class extends Thread rather than Chore because the sleep time
  * can be interrupted when there is something to do, rather than the Chore
  * sleep time which is invariant.
  */
 class LogRoller extends Thread implements LogRollListener {
-  static final Log LOG = LogFactory.getLog(LogRoller.class);  
+  static final Log LOG = LogFactory.getLog(LogRoller.class);
   private final ReentrantLock rollLock = new ReentrantLock();
   private final AtomicBoolean rollLog = new AtomicBoolean(false);
   private final HRegionServer server;

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java Fri May  7 19:26:45 2010
@@ -36,8 +36,8 @@ import java.util.Set;
  * The LruHashMap is a memory-aware HashMap with a configurable maximum
  * memory footprint.
  * <p>
- * It maintains an ordered list of all entries in the map ordered by 
- * access time.  When space needs to be freed becase the maximum has been 
+ * It maintains an ordered list of all entries in the map ordered by
+ * access time.  When space needs to be freed becase the maximum has been
  * reached, or the application has asked to free memory, entries will be
  * evicted according to an LRU (least-recently-used) algorithm.  That is,
  * those entries which have not been accessed the longest will be evicted
@@ -52,8 +52,8 @@ public class LruHashMap<K extends HeapSi
 implements HeapSize, Map<K,V> {
 
   static final Log LOG = LogFactory.getLog(LruHashMap.class);
-  
-  /** The default size (in bytes) of the LRU */  
+
+  /** The default size (in bytes) of the LRU */
   private static final long DEFAULT_MAX_MEM_USAGE = 50000;
   /** The default capacity of the hash table */
   private static final int DEFAULT_INITIAL_CAPACITY = 16;
@@ -61,12 +61,12 @@ implements HeapSize, Map<K,V> {
   private static final int MAXIMUM_CAPACITY = 1 << 30;
   /** The default load factor to use */
   private static final float DEFAULT_LOAD_FACTOR = 0.75f;
-  
+
   /** Memory overhead of this Object (for HeapSize) */
-  private static final int OVERHEAD = 5 * Bytes.SIZEOF_LONG + 
-    2 * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_FLOAT + 3 * ClassSize.REFERENCE + 
+  private static final int OVERHEAD = 5 * Bytes.SIZEOF_LONG +
+    2 * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_FLOAT + 3 * ClassSize.REFERENCE +
     1 * ClassSize.ARRAY;
-  
+
   /** Load factor allowed (usually 75%) */
   private final float loadFactor;
   /** Number of key/vals in the map */
@@ -85,7 +85,7 @@ implements HeapSize, Map<K,V> {
   private long memTotal = 0;
   /** Amount of available memory */
   private long memFree = 0;
-  
+
   /** Number of successful (found) get() calls */
   private long hitCount = 0;
   /** Number of unsuccessful (not found) get() calls */
@@ -120,7 +120,7 @@ implements HeapSize, Map<K,V> {
       throw new IllegalArgumentException("Max memory usage too small to " +
       "support base overhead");
     }
-  
+
     /** Find a power of 2 >= initialCapacity */
     int capacity = calculateCapacity(initialCapacity);
     this.loadFactor = loadFactor;
@@ -145,7 +145,7 @@ implements HeapSize, Map<K,V> {
   public LruHashMap(int initialCapacity, float loadFactor) {
     this(initialCapacity, loadFactor, DEFAULT_MAX_MEM_USAGE);
   }
-  
+
   /**
    * Constructs a new, empty map with the specified initial capacity and
    * with the default load factor and maximum memory usage.
@@ -173,14 +173,14 @@ implements HeapSize, Map<K,V> {
   }
 
   /**
-   * Constructs a new, empty map with the default initial capacity, 
+   * Constructs a new, empty map with the default initial capacity,
    * load factor and maximum memory usage.
    */
   public LruHashMap() {
     this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR,
     DEFAULT_MAX_MEM_USAGE);
   }
-  
+
   //--------------------------------------------------------------------------
   /**
    * Get the currently available memory for this LRU in bytes.
@@ -191,7 +191,7 @@ implements HeapSize, Map<K,V> {
   public long getMemFree() {
     return memFree;
   }
-  
+
   /**
    * Get the maximum memory allowed for this LRU in bytes.
    *
@@ -200,7 +200,7 @@ implements HeapSize, Map<K,V> {
   public long getMemMax() {
     return memTotal;
   }
-  
+
   /**
    * Get the currently used memory for this LRU in bytes.
    *
@@ -209,7 +209,7 @@ implements HeapSize, Map<K,V> {
   public long getMemUsed() {
     return (memTotal - memFree); // FindBugs IS2_INCONSISTENT_SYNC
   }
-  
+
   /**
    * Get the number of hits to the map.  This is the number of times
    * a call to get() returns a matched key.
@@ -219,7 +219,7 @@ implements HeapSize, Map<K,V> {
   public long getHitCount() {
     return hitCount;
   }
-  
+
   /**
    * Get the number of misses to the map.  This is the number of times
    * a call to get() returns null.
@@ -229,7 +229,7 @@ implements HeapSize, Map<K,V> {
   public long getMissCount() {
     return missCount; // FindBugs IS2_INCONSISTENT_SYNC
   }
-  
+
   /**
    * Get the hit ratio.  This is the number of hits divided by the
    * total number of requests.
@@ -240,7 +240,7 @@ implements HeapSize, Map<K,V> {
     return (double)((double)hitCount/
       ((double)(hitCount+missCount)));
   }
-  
+
   /**
    * Free the requested amount of memory from the LRU map.
    *
@@ -261,7 +261,7 @@ implements HeapSize, Map<K,V> {
     }
     return freedMemory;
   }
-  
+
   /**
    * The total memory usage of this map
    *
@@ -270,7 +270,7 @@ implements HeapSize, Map<K,V> {
   public long heapSize() {
     return (memTotal - memFree);
   }
-  
+
   //--------------------------------------------------------------------------
   /**
    * Retrieves the value associated with the specified key.
@@ -286,7 +286,7 @@ implements HeapSize, Map<K,V> {
     checkKey((K)key);
     int hash = hash(key);
     int i = hashIndex(hash, entries.length);
-    Entry<K,V> e = entries[i]; 
+    Entry<K,V> e = entries[i];
     while (true) {
       if (e == null) {
         missCount++;
@@ -313,7 +313,7 @@ implements HeapSize, Map<K,V> {
    * @param key the key
    * @param value the value
    * @return the value that was previously mapped to this key, null if none
-   * @throws UnsupportedOperationException if either objects do not 
+   * @throws UnsupportedOperationException if either objects do not
    * implement HeapSize
    * @throws NullPointerException if the key or value is null
    */
@@ -322,7 +322,7 @@ implements HeapSize, Map<K,V> {
     checkValue(value);
     int hash = hash(key);
     int i = hashIndex(hash, entries.length);
-    
+
     // For old values
     for (Entry<K,V> e = entries[i]; e != null; e = e.next) {
       if (e.hash == hash && isEqual(key, e.key)) {
@@ -338,7 +338,7 @@ implements HeapSize, Map<K,V> {
     checkAndFreeMemory(memChange);
     return null;
   }
-  
+
   /**
    * Deletes the mapping for the specified key if it exists.
    *
@@ -381,7 +381,7 @@ implements HeapSize, Map<K,V> {
   public synchronized void clear() {
     memFree += clearAll();
   }
-  
+
   //--------------------------------------------------------------------------
   /**
    * Checks whether there is a value in the map for the specified key.
@@ -396,9 +396,9 @@ implements HeapSize, Map<K,V> {
     checkKey((K)key);
     int hash = hash(key);
     int i = hashIndex(hash, entries.length);
-    Entry e = entries[i]; 
+    Entry e = entries[i];
     while (e != null) {
-      if (e.hash == hash && isEqual(key, e.key)) 
+      if (e.hash == hash && isEqual(key, e.key))
           return true;
       e = e.next;
     }
@@ -407,7 +407,7 @@ implements HeapSize, Map<K,V> {
 
   /**
    * Checks whether this is a mapping which contains the specified value.
-   * 
+   *
    * Does not affect the LRU.  This is an inefficient operation.
    *
    * @param value the value to check
@@ -443,7 +443,7 @@ implements HeapSize, Map<K,V> {
       throw new NullPointerException("null keys are not allowed");
     }
   }
-  
+
   /**
    * Enforces value constraints.  Null values are not permitted and value must
    * implement HeapSize.  It should not be necessary to verify the second
@@ -461,7 +461,7 @@ implements HeapSize, Map<K,V> {
       throw new NullPointerException("null values are not allowed");
     }
   }
-  
+
   /**
    * Returns the minimum memory usage of the base map structure.
    *
@@ -470,7 +470,7 @@ implements HeapSize, Map<K,V> {
   private long getMinimumUsage() {
     return OVERHEAD + (entries.length * ClassSize.REFERENCE);
   }
-  
+
   //--------------------------------------------------------------------------
   /**
    * Evicts and frees based on LRU until at least as much memory as requested
@@ -497,7 +497,7 @@ implements HeapSize, Map<K,V> {
     removeEntry(headPtr);
     return freed;
   }
-  
+
   /**
    * Moves the specified entry to the most recently used slot of the
    * LRU.  This is called whenever an entry is fetched.
@@ -543,10 +543,10 @@ implements HeapSize, Map<K,V> {
           } else {
             prev.next = next;
           }
-          
+
           Entry<K,V> prevPtr = e.getPrevPtr();
           Entry<K,V> nextPtr = e.getNextPtr();
-          
+
           if(prevPtr != null && nextPtr != null) {
             prevPtr.setNextPtr(nextPtr);
             nextPtr.setPrevPtr(prevPtr);
@@ -557,7 +557,7 @@ implements HeapSize, Map<K,V> {
             headPtr = nextPtr;
             nextPtr.setPrevPtr(null);
           }
-          
+
           return;
       }
       prev = e;
@@ -587,7 +587,7 @@ implements HeapSize, Map<K,V> {
           } else {
             prev.next = next;
           }
-          
+
           // Updating LRU
           Entry<K,V> prevPtr = e.getPrevPtr();
           Entry<K,V> nextPtr = e.getNextPtr();
@@ -601,7 +601,7 @@ implements HeapSize, Map<K,V> {
             headPtr = nextPtr;
             nextPtr.setPrevPtr(null);
           }
-          
+
           return e;
       }
       prev = e;
@@ -668,7 +668,7 @@ implements HeapSize, Map<K,V> {
     size = 0;
     return freedMemory;
   }
-  
+
   //--------------------------------------------------------------------------
   /**
    * Recreates the entire contents of the hashmap into a new array
@@ -680,7 +680,7 @@ implements HeapSize, Map<K,V> {
   private void growTable(int newCapacity) {
     Entry [] oldTable = entries;
     int oldCapacity = oldTable.length;
-    
+
     // Do not allow growing the table beyond the max capacity
     if (oldCapacity == MAXIMUM_CAPACITY) {
       threshold = Integer.MAX_VALUE;
@@ -689,12 +689,12 @@ implements HeapSize, Map<K,V> {
 
     // Determine how much additional space will be required to grow the array
     long requiredSpace = (newCapacity - oldCapacity) * ClassSize.REFERENCE;
-    
+
     // Verify/enforce we have sufficient memory to grow
     checkAndFreeMemory(requiredSpace);
 
     Entry [] newTable = new Entry[newCapacity];
-    
+
     // Transfer existing entries to new hash table
     for(int i=0; i < oldCapacity; i++) {
       Entry<K,V> entry = oldTable[i];
@@ -731,7 +731,7 @@ implements HeapSize, Map<K,V> {
     h ^=  (h >>> 10);
     return h;
   }
-  
+
   /**
    * Compares two objects for equality.  Method uses equals method and
    * assumes neither value is null.
@@ -743,7 +743,7 @@ implements HeapSize, Map<K,V> {
   private boolean isEqual(Object x, Object y) {
     return (x == y || x.equals(y));
   }
-  
+
   /**
    * Determines the index into the current hash table for the specified
    * hashValue.
@@ -778,7 +778,7 @@ implements HeapSize, Map<K,V> {
     }
     return newCapacity;
   }
-  
+
   /**
    * Calculates the threshold of the map given the capacity and load
    * factor.  Once the number of entries in the map grows to the
@@ -799,7 +799,7 @@ implements HeapSize, Map<K,V> {
     memFree -= OVERHEAD;
     memFree -= (entries.length * ClassSize.REFERENCE);
   }
-  
+
   //--------------------------------------------------------------------------
   /**
    * Debugging function that returns a List sorted by access time.
@@ -833,7 +833,7 @@ implements HeapSize, Map<K,V> {
     }
     return entrySet;
   }
-  
+
   /**
    * Get the head of the linked list (least recently used).
    *
@@ -842,16 +842,16 @@ implements HeapSize, Map<K,V> {
   public Entry getHeadPtr() {
     return headPtr;
   }
-  
+
   /**
    * Get the tail of the linked list (most recently used).
-   * 
+   *
    * @return tail of linked list
    */
   public Entry getTailPtr() {
     return tailPtr;
   }
-  
+
   //--------------------------------------------------------------------------
   /**
    * To best optimize this class, some of the methods that are part of a
@@ -860,7 +860,7 @@ implements HeapSize, Map<K,V> {
    * significant overhead and code complexity to support and are
    * unnecessary for the requirements of this class.
    */
-  
+
   /**
    * Intentionally unimplemented.
    */
@@ -884,7 +884,7 @@ implements HeapSize, Map<K,V> {
     throw new UnsupportedOperationException(
     "hashCode(Object) is intentionally unimplemented");
   }
-  
+
   /**
    * Intentionally unimplemented.
    */
@@ -892,7 +892,7 @@ implements HeapSize, Map<K,V> {
     throw new UnsupportedOperationException(
     "keySet() is intentionally unimplemented");
   }
-  
+
   /**
    * Intentionally unimplemented.
    */
@@ -900,7 +900,7 @@ implements HeapSize, Map<K,V> {
     throw new UnsupportedOperationException(
     "putAll() is intentionally unimplemented");
   }
-  
+
   /**
    * Intentionally unimplemented.
    */
@@ -922,9 +922,9 @@ implements HeapSize, Map<K,V> {
   protected static class Entry<K extends HeapSize, V extends HeapSize>
   implements Map.Entry<K,V>, HeapSize {
     /** The baseline overhead memory usage of this class */
-    static final int OVERHEAD = 1 * Bytes.SIZEOF_LONG + 
+    static final int OVERHEAD = 1 * Bytes.SIZEOF_LONG +
       5 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT;
-    
+
     /** The key */
     protected final K key;
     /** The value */
@@ -933,12 +933,12 @@ implements HeapSize, Map<K,V> {
     protected final int hash;
     /** The next entry in the hash chain (for collisions) */
     protected Entry<K,V> next;
-    
+
     /** The previous entry in the LRU list (towards LRU) */
     protected Entry<K,V> prevPtr;
     /** The next entry in the LRU list (towards MRU) */
     protected Entry<K,V> nextPtr;
-    
+
     /** The precomputed heap size of this entry */
     protected long heapSize;
 
@@ -979,7 +979,7 @@ implements HeapSize, Map<K,V> {
     public V getValue() {
       return value;
     }
-  
+
     /**
      * Set the value of this entry.
      *
@@ -995,7 +995,7 @@ implements HeapSize, Map<K,V> {
       value = newValue;
       return oldValue;
     }
-  
+
     /**
      * Replace the value of this entry.
      *
@@ -1011,7 +1011,7 @@ implements HeapSize, Map<K,V> {
       heapSize += sizeDiff;
       return sizeDiff;
     }
-  
+
     /**
      * Returns true is the specified entry has the same key and the
      * same value as this entry.
@@ -1028,13 +1028,13 @@ implements HeapSize, Map<K,V> {
       if (k1 == k2 || (k1 != null && k1.equals(k2))) {
           Object v1 = getValue();
           Object v2 = e.getValue();
-          if (v1 == v2 || (v1 != null && v1.equals(v2))) 
+          if (v1 == v2 || (v1 != null && v1.equals(v2)))
             return true;
       }
       return false;
     }
-    
-    /** 
+
+    /**
      * Returns the hash code of the entry by xor'ing the hash values
      * of the key and value of this entry.
      *
@@ -1043,7 +1043,7 @@ implements HeapSize, Map<K,V> {
     public int hashCode() {
       return (key.hashCode() ^ value.hashCode());
     }
-  
+
     /**
      * Returns String representation of the entry in form "key=value"
      *
@@ -1061,15 +1061,15 @@ implements HeapSize, Map<K,V> {
     protected void setPrevPtr(Entry<K,V> prevPtr){
       this.prevPtr = prevPtr;
     }
-    
+
     /**
      * Returns the previous pointer for the entry in the LRU.
      * @return previous entry
      */
     protected Entry<K,V> getPrevPtr(){
       return prevPtr;
-    }    
-    
+    }
+
     /**
      * Sets the next pointer for the entry in the LRU.
      * @param nextPtr next entry
@@ -1077,7 +1077,7 @@ implements HeapSize, Map<K,V> {
     protected void setNextPtr(Entry<K,V> nextPtr){
       this.nextPtr = nextPtr;
     }
-    
+
     /**
      * Returns the next pointer for the entry in teh LRU.
      * @return next entry
@@ -1085,7 +1085,7 @@ implements HeapSize, Map<K,V> {
     protected Entry<K,V> getNextPtr(){
       return nextPtr;
     }
-    
+
     /**
      * Returns the pre-computed and "deep" size of the Entry
      * @return size of the entry in bytes

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java Fri May  7 19:26:45 2010
@@ -204,7 +204,7 @@ public class MemStore implements HeapSiz
     return s;
   }
 
-  /** 
+  /**
    * Write a delete
    * @param delete
    * @return approximate size of the passed key and value.
@@ -221,7 +221,7 @@ public class MemStore implements HeapSiz
     //TODO Would be nice with if we had an iterator for this, so we could remove
     //things that needs to be removed while iterating and don't have to go
     //back and do it afterwards
-    
+
     try {
       boolean notpresent = false;
       List<KeyValue> deletes = new ArrayList<KeyValue>();
@@ -230,34 +230,34 @@ public class MemStore implements HeapSiz
       //Parse the delete, so that it is only done once
       byte [] deleteBuffer = delete.getBuffer();
       int deleteOffset = delete.getOffset();
-  
+
       int deleteKeyLen = Bytes.toInt(deleteBuffer, deleteOffset);
       deleteOffset += Bytes.SIZEOF_INT + Bytes.SIZEOF_INT;
-  
+
       short deleteRowLen = Bytes.toShort(deleteBuffer, deleteOffset);
       deleteOffset += Bytes.SIZEOF_SHORT;
       int deleteRowOffset = deleteOffset;
-  
+
       deleteOffset += deleteRowLen;
-  
+
       byte deleteFamLen = deleteBuffer[deleteOffset];
       deleteOffset += Bytes.SIZEOF_BYTE + deleteFamLen;
-  
+
       int deleteQualifierOffset = deleteOffset;
       int deleteQualifierLen = deleteKeyLen - deleteRowLen - deleteFamLen -
-        Bytes.SIZEOF_SHORT - Bytes.SIZEOF_BYTE - Bytes.SIZEOF_LONG - 
+        Bytes.SIZEOF_SHORT - Bytes.SIZEOF_BYTE - Bytes.SIZEOF_LONG -
         Bytes.SIZEOF_BYTE;
-      
+
       deleteOffset += deleteQualifierLen;
-  
+
       int deleteTimestampOffset = deleteOffset;
       deleteOffset += Bytes.SIZEOF_LONG;
       byte deleteType = deleteBuffer[deleteOffset];
-      
+
       //Comparing with tail from memstore
       for (KeyValue kv : tail) {
-        DeleteCode res = DeleteCompare.deleteCompare(kv, deleteBuffer, 
-            deleteRowOffset, deleteRowLen, deleteQualifierOffset, 
+        DeleteCode res = DeleteCompare.deleteCompare(kv, deleteBuffer,
+            deleteRowOffset, deleteRowLen, deleteQualifierOffset,
             deleteQualifierLen, deleteTimestampOffset, deleteType,
             comparator.getRawComparator());
         if (res == DeleteCode.DONE) {
@@ -272,7 +272,7 @@ public class MemStore implements HeapSiz
         notpresent = this.kvset.remove(kv);
         s -= heapSizeChange(kv, notpresent);
       }
-      
+
       // Adding the delete to memstore. Add any value, as long as
       // same instance each time.
       s += heapSizeChange(delete, this.kvset.add(delete));
@@ -282,7 +282,7 @@ public class MemStore implements HeapSiz
     this.size.addAndGet(s);
     return s;
   }
-  
+
   /**
    * @param kv Find the row that comes after this one.  If null, we return the
    * first.
@@ -533,7 +533,7 @@ public class MemStore implements HeapSiz
   void readLockUnlock() {
     this.lock.readLock().unlock();
   }
-  
+
   /**
    *
    * @param set memstore or snapshot
@@ -566,7 +566,7 @@ public class MemStore implements HeapSiz
     }
     return false;
   }
-  
+
 
   /*
    * MemStoreScanner implements the KeyValueScanner.
@@ -730,7 +730,7 @@ public class MemStore implements HeapSiz
 
   public final static long FIXED_OVERHEAD = ClassSize.align(
       ClassSize.OBJECT + (8 * ClassSize.REFERENCE));
-  
+
   public final static long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
       ClassSize.REENTRANT_LOCK + ClassSize.ATOMIC_LONG +
       ClassSize.COPYONWRITE_ARRAYSET + ClassSize.COPYONWRITE_ARRAYLIST +
@@ -744,11 +744,11 @@ public class MemStore implements HeapSiz
    * @return Size
    */
   long heapSizeChange(final KeyValue kv, final boolean notpresent) {
-    return notpresent ? 
+    return notpresent ?
         ClassSize.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + kv.heapSize()):
         0;
   }
-  
+
   /**
    * Get the entire heap usage for this MemStore not including keys in the
    * snapshot.
@@ -757,7 +757,7 @@ public class MemStore implements HeapSiz
   public long heapSize() {
     return size.get();
   }
-  
+
   /**
    * Get the heap usage of KVs in this MemStore.
    */

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java Fri May  7 19:26:45 2010
@@ -45,14 +45,14 @@ import java.util.concurrent.locks.Reentr
  * NOTE: This class extends Thread rather than Chore because the sleep time
  * can be interrupted when there is something to do, rather than the Chore
  * sleep time which is invariant.
- * 
+ *
  * @see FlushRequester
  */
 class MemStoreFlusher extends Thread implements FlushRequester {
   static final Log LOG = LogFactory.getLog(MemStoreFlusher.class);
   private final BlockingQueue<HRegion> flushQueue =
     new LinkedBlockingQueue<HRegion>();
-  
+
   private final HashSet<HRegion> regionsInQueue = new HashSet<HRegion>();
 
   private final long threadWakeFrequency;
@@ -61,7 +61,7 @@ class MemStoreFlusher extends Thread imp
 
   protected final long globalMemStoreLimit;
   protected final long globalMemStoreLimitLowMark;
-  
+
   private static final float DEFAULT_UPPER = 0.4f;
   private static final float DEFAULT_LOWER = 0.25f;
   private static final String UPPER_KEY =
@@ -91,7 +91,7 @@ class MemStoreFlusher extends Thread imp
         "because supplied " + LOWER_KEY + " was > " + UPPER_KEY);
     }
     this.globalMemStoreLimitLowMark = lower;
-    this.blockingStoreFilesNumber = 
+    this.blockingStoreFilesNumber =
       conf.getInt("hbase.hstore.blockingStoreFiles", -1);
     if (this.blockingStoreFilesNumber == -1) {
       this.blockingStoreFilesNumber = 1 +
@@ -120,7 +120,7 @@ class MemStoreFlusher extends Thread imp
     float limit = c.getFloat(key, defaultLimit);
     return getMemStoreLimit(max, limit, defaultLimit);
   }
-  
+
   static long getMemStoreLimit(final long max, final float limit,
       final float defaultLimit) {
     if (limit >= 0.9f || limit < 0.1f) {
@@ -129,7 +129,7 @@ class MemStoreFlusher extends Thread imp
     }
     return (long)(max * limit);
   }
-  
+
   @Override
   public void run() {
     while (!this.server.isStopRequested()) {
@@ -159,7 +159,7 @@ class MemStoreFlusher extends Thread imp
     this.flushQueue.clear();
     LOG.info(getName() + " exiting");
   }
-  
+
   public void request(HRegion r) {
     synchronized (regionsInQueue) {
       if (!regionsInQueue.contains(r)) {
@@ -168,10 +168,10 @@ class MemStoreFlusher extends Thread imp
       }
     }
   }
-  
+
   /**
    * Only interrupt once it's done with a run through the work loop.
-   */ 
+   */
   void interruptIfNecessary() {
     lock.lock();
     try {
@@ -180,10 +180,10 @@ class MemStoreFlusher extends Thread imp
       lock.unlock();
     }
   }
-  
+
   /*
    * Flush a region.
-   * 
+   *
    * @param region the region to be flushed
    * @param removeFromQueue True if the region needs to be removed from the
    * flush queue. False if called from the main flusher run loop and true if
@@ -196,21 +196,21 @@ class MemStoreFlusher extends Thread imp
    * That compactions do not run when called out of flushSomeRegions means that
    * compactions can be reported by the historian without danger of deadlock
    * (HBASE-670).
-   * 
+   *
    * <p>In the main run loop, regions have already been removed from the flush
    * queue, and if this method is called for the relief of memory pressure,
-   * this may not be necessarily true. We want to avoid trying to remove 
+   * this may not be necessarily true. We want to avoid trying to remove
    * region from the queue because if it has already been removed, it requires a
    * sequential scan of the queue to determine that it is not in the queue.
-   * 
+   *
    * <p>If called from flushSomeRegions, the region may be in the queue but
-   * it may have been determined that the region had a significant amount of 
+   * it may have been determined that the region had a significant amount of
    * memory in use and needed to be flushed to relieve memory pressure. In this
    * case, its flush may preempt the pending request in the queue, and if so,
    * it needs to be removed from the queue to avoid flushing the region
    * multiple times.
-   * 
-   * @return true if the region was successfully flushed, false otherwise. If 
+   *
+   * @return true if the region was successfully flushed, false otherwise. If
    * false, there will be accompanying log messages explaining why the log was
    * not flushed.
    */
@@ -334,7 +334,7 @@ class MemStoreFlusher extends Thread imp
   }
 
   /**
-   * Check if the regionserver's memstore memory usage is greater than the 
+   * Check if the regionserver's memstore memory usage is greater than the
    * limit. If so, flush regions with the biggest memstores until we're down
    * to the lower limit. This method blocks callers until we're down to a safe
    * amount of memstore consumption.

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java Fri May  7 19:26:45 2010
@@ -33,7 +33,7 @@ import java.util.NavigableSet;
  * This is the primary class used to process KeyValues during a Get or Scan
  * operation.
  * <p>
- * It encapsulates the handling of the column and version input parameters to 
+ * It encapsulates the handling of the column and version input parameters to
  * the query through a {@link ColumnTracker}.
  * <p>
  * Deletes are handled using the {@link DeleteTracker}.
@@ -41,10 +41,10 @@ import java.util.NavigableSet;
  * All other query parameters are accessed from the client-specified Get.
  * <p>
  * The primary method used is {@link #match} with the current KeyValue.  It will
- * return a {@link QueryMatcher.MatchCode} 
- * 
+ * return a {@link QueryMatcher.MatchCode}
+ *
  * , deletes,
- * versions, 
+ * versions,
  */
 public class QueryMatcher {
   /**
@@ -59,17 +59,17 @@ public class QueryMatcher {
      * Include KeyValue in the returned result
      */
     INCLUDE,
-    
+
     /**
      * Do not include KeyValue in the returned result
      */
     SKIP,
-    
+
     /**
      * Do not include, jump to next StoreFile or memstore (in time order)
      */
     NEXT,
-    
+
     /**
      * Do not include, return current result
      */
@@ -93,25 +93,25 @@ public class QueryMatcher {
      */
     DONE_SCAN,
   }
-  
+
   /** Keeps track of deletes */
   protected DeleteTracker deletes;
-  
+
   /** Keeps track of columns and versions */
   protected ColumnTracker columns;
-  
+
   /** Key to seek to in memstore and StoreFiles */
   protected KeyValue startKey;
-  
+
   /** Row comparator for the region this query is for */
   KeyComparator rowComparator;
-  
+
   /** Row the query is on */
   protected byte [] row;
-  
+
   /** TimeRange the query is for */
   protected TimeRange tr;
-  
+
   /** Oldest allowed version stamp for TTL enforcement */
   protected long oldestStamp;
 
@@ -125,7 +125,7 @@ public class QueryMatcher {
    * @param ttl
    * @param rowComparator
    */
-  public QueryMatcher(Get get, byte [] family, 
+  public QueryMatcher(Get get, byte [] family,
       NavigableSet<byte[]> columns, long ttl, KeyComparator rowComparator,
       int maxVersions) {
     this.row = get.getRow();
@@ -164,7 +164,7 @@ public class QueryMatcher {
     this.startKey = matcher.getStartKey();
     reset();
   }
-  
+
   /**
    * Main method for ColumnMatcher.
    * <p>
@@ -195,10 +195,10 @@ public class QueryMatcher {
     // Directly act on KV buffer
     byte [] bytes = kv.getBuffer();
     int offset = kv.getOffset();
-    
+
     int keyLength = Bytes.toInt(bytes, offset);
     offset += KeyValue.ROW_OFFSET;
-    
+
     short rowLength = Bytes.toShort(bytes, offset);
     offset += Bytes.SIZEOF_SHORT;
 
@@ -207,7 +207,7 @@ public class QueryMatcher {
     /* Check ROW
      * If past query's row, go to next StoreFile
      * If not reached query's row, go to next KeyValue
-     */ 
+     */
     int ret = this.rowComparator.compareRows(row, 0, row.length,
         bytes, offset, rowLength);
     if (ret <= -1) {
@@ -220,7 +220,7 @@ public class QueryMatcher {
     offset += rowLength;
     byte familyLength = bytes[offset];
     offset += Bytes.SIZEOF_BYTE + familyLength;
-    
+
     int columnLength = keyLength + KeyValue.ROW_OFFSET -
       (offset - kv.getOffset()) - KeyValue.TIMESTAMP_TYPE_SIZE;
     int columnOffset = offset;
@@ -244,14 +244,14 @@ public class QueryMatcher {
      */
     byte type = bytes[offset];
     // if delete type == delete family, return done_row
-    
+
     if (isDelete(type)) {
       if (tr.withinOrAfterTimeRange(timestamp)) {
         this.deletes.add(bytes, columnOffset, columnLength, timestamp, type);
       }
       return MatchCode.SKIP;  // skip the delete cell.
     }
-    
+
     /* Check TimeRange
      * If outside of range, move to next KeyValue
      */
@@ -274,8 +274,8 @@ public class QueryMatcher {
      * Returns a MatchCode directly, identical language
      * If matched column without enough versions, include
      * If enough versions of this column or does not match, skip
-     * If have moved past 
-     * If enough versions of everything, 
+     * If have moved past
+     * If enough versions of everything,
      * TODO: No mapping from Filter.ReturnCode to MatchCode.
      */
     MatchCode mc = columns.checkColumn(bytes, columnOffset, columnLength);
@@ -293,7 +293,7 @@ public class QueryMatcher {
   protected boolean isDelete(byte type) {
     return (type != KeyValue.Type.Put.getCode());
   }
-  
+
   protected boolean isExpired(long timestamp) {
     return (timestamp < oldestStamp);
   }
@@ -309,18 +309,18 @@ public class QueryMatcher {
   public ColumnCount getSeekColumn() {
     return this.columns.getColumnHint();
   }
-  
+
   /**
    * Called after reading each section (memstore, snapshot, storefiles).
    * <p>
    * This method will update the internal structures to be accurate for
-   * the next section. 
+   * the next section.
    */
   public void update() {
     this.deletes.update();
     this.columns.update();
   }
-  
+
   /**
    * Resets the current columns and deletes
    */
@@ -336,52 +336,52 @@ public class QueryMatcher {
   public void setRow(byte [] row) {
     this.row = row;
   }
-  
+
   /**
-   * 
+   *
    * @return the start key
    */
   public KeyValue getStartKey() {
     return this.startKey;
   }
-  
+
   /**
    * @return the TimeRange
    */
   public TimeRange getTimeRange() {
     return this.tr;
   }
-  
+
   /**
    * @return the oldest stamp
    */
   public long getOldestStamp() {
     return this.oldestStamp;
   }
-  
+
   /**
    * @return current KeyComparator
    */
   public KeyComparator getRowComparator() {
     return this.rowComparator;
   }
-  
+
   /**
    * @return ColumnTracker
    */
   public ColumnTracker getColumnTracker() {
     return this.columns;
   }
-  
+
   /**
    * @return DeleteTracker
    */
   public DeleteTracker getDeleteTracker() {
     return this.deletes;
   }
-  
+
   /**
-   * 
+   *
    * @return <code>true</code> when done.
    */
   public boolean isDone() {

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java Fri May  7 19:26:45 2010
@@ -27,7 +27,7 @@ import java.io.IOException;
  */
 public class RegionServerRunningException extends IOException {
   private static final long serialVersionUID = 1L << 31 - 1L;
-  
+
   /** Default Constructor */
   public RegionServerRunningException() {
     super();

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java Fri May  7 19:26:45 2010
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.util.Byte
  * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
  * <li>{@link #update} when reaching the end of a StoreFile or row for scans
  * <p>
- * This class is NOT thread-safe as queries are never multi-threaded 
+ * This class is NOT thread-safe as queries are never multi-threaded
  */
 public class ScanDeleteTracker implements DeleteTracker {
 
@@ -53,7 +53,7 @@ public class ScanDeleteTracker implement
   public ScanDeleteTracker() {
     super();
   }
-  
+
   /**
    * Add the specified KeyValue to the list of deletes to check against for
    * this row operation.
@@ -91,7 +91,7 @@ public class ScanDeleteTracker implement
     // missing else is never called.
   }
 
-  /** 
+  /**
    * Check if the specified KeyValue buffer has been deleted by a previously
    * seen delete.
    *
@@ -107,7 +107,7 @@ public class ScanDeleteTracker implement
     if (timestamp < familyStamp) {
       return true;
     }
-    
+
     if (deleteBuffer != null) {
       int ret = Bytes.compareTo(deleteBuffer, deleteOffset, deleteLength,
           buffer, qualifierOffset, qualifierLength);
@@ -150,7 +150,7 @@ public class ScanDeleteTracker implement
   }
 
   @Override
-  // should not be called at all even (!)  
+  // should not be called at all even (!)
   public void update() {
     this.reset();
   }

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java Fri May  7 19:26:45 2010
@@ -44,7 +44,7 @@ public class ScanQueryMatcher extends Qu
    * @param rowComparator
    */
   public ScanQueryMatcher(Scan scan, byte [] family,
-      NavigableSet<byte[]> columns, long ttl, 
+      NavigableSet<byte[]> columns, long ttl,
       KeyValue.KeyComparator rowComparator, int maxVersions) {
     this.tr = scan.getTimeRange();
     this.oldestStamp = System.currentTimeMillis() - ttl;
@@ -52,7 +52,7 @@ public class ScanQueryMatcher extends Qu
     this.deletes =  new ScanDeleteTracker();
     this.startKey = KeyValue.createFirstOnRow(scan.getStartRow());
     this.filter = scan.getFilter();
-    
+
     // Single branch to deal with two types of reads (columns vs all in family)
     if (columns == null || columns.size() == 0) {
       // use a specialized scan for wildcard column tracker.
@@ -71,7 +71,7 @@ public class ScanQueryMatcher extends Qu
    * - include the current KeyValue (MatchCode.INCLUDE)
    * - ignore the current KeyValue (MatchCode.SKIP)
    * - got to the next row (MatchCode.DONE)
-   * 
+   *
    * @param kv KeyValue to check
    * @return The match code instance.
    */
@@ -82,14 +82,14 @@ public class ScanQueryMatcher extends Qu
 
     byte [] bytes = kv.getBuffer();
     int offset = kv.getOffset();
-    int initialOffset = offset; 
+    int initialOffset = offset;
 
     int keyLength = Bytes.toInt(bytes, offset, Bytes.SIZEOF_INT);
     offset += KeyValue.ROW_OFFSET;
-    
+
     short rowLength = Bytes.toShort(bytes, offset, Bytes.SIZEOF_SHORT);
     offset += Bytes.SIZEOF_SHORT;
-    
+
     int ret = this.rowComparator.compareRows(row, 0, row.length,
         bytes, offset, rowLength);
     if (ret <= -1) {
@@ -109,17 +109,17 @@ public class ScanQueryMatcher extends Qu
       stickyNextRow = true;
       return MatchCode.SEEK_NEXT_ROW;
     }
-    
+
     //Passing rowLength
     offset += rowLength;
 
     //Skipping family
     byte familyLength = bytes [offset];
     offset += familyLength + 1;
-    
+
     int qualLength = keyLength + KeyValue.ROW_OFFSET -
       (offset - initialOffset) - KeyValue.TIMESTAMP_TYPE_SIZE;
-    
+
     long timestamp = kv.getTimestamp();
     if (isExpired(timestamp)) {
       // done, the rest of this column will also be expired as well.
@@ -132,7 +132,7 @@ public class ScanQueryMatcher extends Qu
         this.deletes.add(bytes, offset, qualLength, timestamp, type);
         // Can't early out now, because DelFam come before any other keys
       }
-      // May be able to optimize the SKIP here, if we matched 
+      // May be able to optimize the SKIP here, if we matched
       // due to a DelFam, we can skip to next row
       // due to a DelCol, we can skip to next col
       // But it requires more info out of isDelete().

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java Fri May  7 19:26:45 2010
@@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.util.Byte
  * Keeps track of the columns for a scan if they are not explicitly specified
  */
 public class ScanWildcardColumnTracker implements ColumnTracker {
-  private static final Log LOG = 
+  private static final Log LOG =
     LogFactory.getLog(ScanWildcardColumnTracker.class);
   private byte [] columnBuffer = null;
   private int columnOffset = 0;
@@ -103,7 +103,7 @@ public class ScanWildcardColumnTracker i
     currentCount = 0;
     if (++currentCount > maxVersions)
       return MatchCode.SKIP;
-    return MatchCode.INCLUDE;    
+    return MatchCode.INCLUDE;
   }
 
   @Override
@@ -122,7 +122,7 @@ public class ScanWildcardColumnTracker i
    * Used by matcher and scan/get to get a hint of the next column
    * to seek to after checkColumn() returns SKIP.  Returns the next interesting
    * column we want, or NULL there is none (wildcard scanner).
-   * 
+   *
    * @return The column count.
    */
   public ColumnCount getColumnHint() {
@@ -131,7 +131,7 @@ public class ScanWildcardColumnTracker i
 
 
   /**
-   * We can never know a-priori if we are done, so always return false. 
+   * We can never know a-priori if we are done, so always return false.
    * @return false
    */
   @Override

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Fri May  7 19:26:45 2010
@@ -67,7 +67,7 @@ import java.util.concurrent.locks.Reentr
   * A Store holds a column family in a Region.  Its a memstore and a set of zero
   * or more StoreFiles, which stretch backwards over time.
   *
-  * <p>There's no reason to consider append-logging at this level; all logging 
+  * <p>There's no reason to consider append-logging at this level; all logging
   * and locking is handled at the HRegion level.  Store just provides
   * services to manage sets of StoreFiles.  One of the most important of those
   * services is compaction services where files are aggregated once they pass
@@ -140,7 +140,7 @@ public class Store implements HConstants
   private final int blocksize;
   private final boolean blockcache;
   private final Compression.Algorithm compression;
-  
+
   // Comparing KeyValues
   final KeyValue.KVComparator comparator;
   final KeyValue.KVComparator comparatorIgnoringType;
@@ -191,7 +191,7 @@ public class Store implements HConstants
       this.ttl *= 1000;
     }
     this.memstore = new MemStore(this.comparator);
-    this.regionCompactionDir = new Path(HRegion.getCompactionDir(basedir), 
+    this.regionCompactionDir = new Path(HRegion.getCompactionDir(basedir),
         Integer.toString(info.getEncodedName()));
     this.storeName = this.family.getName();
     this.storeNameStr = Bytes.toString(this.storeName);
@@ -200,10 +200,10 @@ public class Store implements HConstants
     // MIN_COMMITS_FOR_COMPACTION map files
     this.compactionThreshold =
       conf.getInt("hbase.hstore.compactionThreshold", 3);
-    
+
     // Check if this is in-memory store
     this.inMemory = family.isInMemory();
-    
+
     // By default we split region if a file > DEFAULT_MAX_FILE_SIZE.
     long maxFileSize = info.getTableDesc().getMaxFileSize();
     if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) {
@@ -233,7 +233,7 @@ public class Store implements HConstants
       this.maxSeqId = newId; // start with the log id we just recovered.
     }
   }
-    
+
   HColumnDescriptor getFamily() {
     return this.family;
   }
@@ -241,7 +241,7 @@ public class Store implements HConstants
   long getMaxSequenceId() {
     return this.maxSeqId;
   }
-  
+
   long getMaxSeqIdBeforeLogRecovery() {
     return maxSeqIdBeforeLogRecovery;
   }
@@ -291,10 +291,10 @@ public class Store implements HConstants
   }
 
   /*
-   * Read the reconstructionLog and put into memstore. 
+   * Read the reconstructionLog and put into memstore.
    *
-   * We can ignore any log message that has a sequence ID that's equal to or 
-   * lower than maxSeqID.  (Because we know such log messages are already 
+   * We can ignore any log message that has a sequence ID that's equal to or
+   * lower than maxSeqID.  (Because we know such log messages are already
    * reflected in the HFiles.)
    *
    * @return the new max sequence id as per the log, or -1 if no log recovered
@@ -377,7 +377,7 @@ public class Store implements HConstants
     } finally {
       logReader.close();
     }
-    
+
     if (maxSeqIdInLog > -1) {
       // We read some edits, so we should flush the memstore
       this.snapshot();
@@ -437,7 +437,7 @@ public class Store implements HConstants
 
   /**
    * Adds a value to the memstore
-   * 
+   *
    * @param kv
    * @return memstore size delta
    */
@@ -452,7 +452,7 @@ public class Store implements HConstants
 
   /**
    * Adds a value to the memstore
-   * 
+   *
    * @param kv
    * @return memstore size delta
    */
@@ -474,10 +474,10 @@ public class Store implements HConstants
 
   /**
    * Close all the readers
-   * 
+   *
    * We don't need to worry about subsequent requests because the HRegion holds
    * a write lock that will prevent any more reads or writes.
-   * 
+   *
    * @throws IOException
    */
   List<StoreFile> close() throws IOException {
@@ -567,7 +567,7 @@ public class Store implements HConstants
         writer.close();
       }
     }
-    StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache, 
+    StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache,
       this.conf, this.inMemory);
     Reader r = sf.getReader();
     this.storeSize += r.length();
@@ -657,21 +657,21 @@ public class Store implements HConstants
   //////////////////////////////////////////////////////////////////////////////
 
   /**
-   * Compact the StoreFiles.  This method may take some time, so the calling 
+   * Compact the StoreFiles.  This method may take some time, so the calling
    * thread must be able to block for long periods.
-   * 
+   *
    * <p>During this time, the Store can work as usual, getting values from
    * StoreFiles and writing new StoreFiles from the memstore.
-   * 
-   * Existing StoreFiles are not destroyed until the new compacted StoreFile is 
+   *
+   * Existing StoreFiles are not destroyed until the new compacted StoreFile is
    * completely written-out to disk.
    *
    * <p>The compactLock prevents multiple simultaneous compactions.
    * The structureLock prevents us from interfering with other write operations.
-   * 
-   * <p>We don't want to hold the structureLock for the whole time, as a compact() 
+   *
+   * <p>We don't want to hold the structureLock for the whole time, as a compact()
    * can be lengthy and we want to allow cache-flushes during this period.
-   * 
+   *
    * @param mc True to force a major compaction regardless of thresholds
    * @return row to split around if a split is needed, null otherwise
    * @throws IOException
@@ -699,7 +699,7 @@ public class Store implements HConstants
       }
 
       boolean references = hasReferences(filesToCompact);
-      if (!majorcompaction && !references && 
+      if (!majorcompaction && !references &&
           (forceSplit || (filesToCompact.size() < compactionThreshold))) {
         return checkSplit(forceSplit);
       }
@@ -733,14 +733,14 @@ public class Store implements HConstants
         fileSizes[i] = len;
         totalSize += len;
       }
- 
+
       if (!majorcompaction && !references) {
-        // Here we select files for incremental compaction.  
-        // The rule is: if the largest(oldest) one is more than twice the 
+        // Here we select files for incremental compaction.
+        // The rule is: if the largest(oldest) one is more than twice the
         // size of the second, skip the largest, and continue to next...,
         // until we meet the compactionThreshold limit.
         for (point = 0; point < countOfFiles - 1; point++) {
-          if ((fileSizes[point] < fileSizes[point + 1] * 2) && 
+          if ((fileSizes[point] < fileSizes[point + 1] * 2) &&
                (countOfFiles - point) <= maxFilesToCompact) {
             break;
           }
@@ -763,7 +763,7 @@ public class Store implements HConstants
             " file(s), size: " + skipped);
         }
       }
- 
+
       // Ready to go.  Have list of files to compact.
       LOG.debug("Started compaction of " + filesToCompact.size() + " file(s)" +
         (references? ", hasReferences=true,": " ") + " into " +
@@ -798,7 +798,7 @@ public class Store implements HConstants
 
   /*
    * Gets lowest timestamp from files in a dir
-   * 
+   *
    * @param fs
    * @param dir
    * @throws IOException
@@ -866,7 +866,7 @@ public class Store implements HConstants
 
   /**
    * Do a minor/major compaction.  Uses the scan infrastructure to make it easy.
-   * 
+   *
    * @param filesToCompact which files to compact
    * @param majorCompaction true to major compact (prune all deletes, max versions, etc)
    * @param maxId Readers maximum sequence id.
@@ -942,14 +942,14 @@ public class Store implements HConstants
   }
 
   /*
-   * It's assumed that the compactLock  will be acquired prior to calling this 
+   * It's assumed that the compactLock  will be acquired prior to calling this
    * method!  Otherwise, it is not thread-safe!
    *
    * <p>It works by processing a compaction that's been written to disk.
-   * 
+   *
    * <p>It is usually invoked at the end of a compaction, but might also be
    * invoked at HStore startup, if the prior execution died midway through.
-   * 
+   *
    * <p>Moving the compacted TreeMap into place means:
    * <pre>
    * 1) Moving the new compacted StoreFile into place
@@ -957,7 +957,7 @@ public class Store implements HConstants
    * 3) Loading the new TreeMap.
    * 4) Compute new store size
    * </pre>
-   * 
+   *
    * @param compactedFiles list of files that were compacted
    * @param compactedFile StoreFile that is the result of the compaction
    * @return StoreFile created. May be null.
@@ -1038,7 +1038,7 @@ public class Store implements HConstants
   public int getNumberOfstorefiles() {
     return this.storefiles.size();
   }
-  
+
   /*
    * @param wantedVersions How many versions were asked for.
    * @return wantedVersions or this families' VERSIONS.
@@ -1065,8 +1065,8 @@ public class Store implements HConstants
 
   /**
    * Find the key that matches <i>row</i> exactly, or the one that immediately
-   * preceeds it. WARNING: Only use this method on a table where writes occur 
-   * with strictly increasing timestamps. This method assumes this pattern of 
+   * preceeds it. WARNING: Only use this method on a table where writes occur
+   * with strictly increasing timestamps. This method assumes this pattern of
    * writes in order to make it reasonably performant.  Also our search is
    * dependent on the axiom that deletes are for cells that are in the container
    * that follows whether a memstore snapshot or a storefile, not for the
@@ -1260,8 +1260,8 @@ public class Store implements HConstants
         byte [] lk = r.getLastKey();
         KeyValue lastKey = KeyValue.createKeyValueFromKey(lk, 0, lk.length);
         // if the midkey is the same as the first and last keys, then we cannot
-        // (ever) split this region. 
-        if (this.comparator.compareRows(mk, firstKey) == 0 && 
+        // (ever) split this region.
+        if (this.comparator.compareRows(mk, firstKey) == 0 &&
             this.comparator.compareRows(mk, lastKey) == 0) {
           if (LOG.isDebugEnabled()) {
             LOG.debug("cannot split because midkey is the same as first or " +
@@ -1278,12 +1278,12 @@ public class Store implements HConstants
     }
     return null;
   }
-  
+
   /** @return aggregate size of HStore */
   public long getSize() {
     return storeSize;
   }
-  
+
   //////////////////////////////////////////////////////////////////////////////
   // File administration
   //////////////////////////////////////////////////////////////////////////////
@@ -1383,7 +1383,7 @@ public class Store implements HConstants
    * @param kv Key to find.
    * @return True if we were able to seek the scanner to <code>b</code> or to
    * the key just after.
-   * @throws IOException 
+   * @throws IOException
    */
   static boolean getClosest(final HFileScanner s, final KeyValue kv)
   throws IOException {
@@ -1404,15 +1404,15 @@ public class Store implements HConstants
     }
     return true;
   }
-  
+
   /**
    * Retrieve results from this store given the specified Get parameters.
    * @param get Get operation
    * @param columns List of columns to match, can be empty (not null)
-   * @param result List to add results to 
+   * @param result List to add results to
    * @throws IOException
    */
-  public void get(Get get, NavigableSet<byte[]> columns, List<KeyValue> result) 
+  public void get(Get get, NavigableSet<byte[]> columns, List<KeyValue> result)
   throws IOException {
     KeyComparator keyComparator = this.comparator.getRawComparator();
 
@@ -1426,12 +1426,12 @@ public class Store implements HConstants
         // Received early-out from memstore
         return;
       }
-    
+
       // Check if we even have storefiles
       if (this.storefiles.isEmpty()) {
         return;
       }
-    
+
       // Get storefiles for this store
       List<HFileScanner> storefileScanners = new ArrayList<HFileScanner>();
       for (StoreFile sf : this.storefiles.descendingMap().values()) {
@@ -1443,11 +1443,11 @@ public class Store implements HConstants
         // Get a scanner that caches the block and uses pread
         storefileScanners.add(r.getScanner(true, true));
       }
-    
+
       // StoreFileGetScan will handle reading this store's storefiles
       StoreFileGetScan scanner = new StoreFileGetScan(storefileScanners, matcher);
-    
-      // Run a GET scan and put results into the specified list 
+
+      // Run a GET scan and put results into the specified list
       scanner.get(result);
     } finally {
       this.lock.readLock().unlock();
@@ -1521,17 +1521,17 @@ public class Store implements HConstants
   public boolean hasTooManyStoreFiles() {
     return this.storefiles.size() > this.compactionThreshold;
   }
-  
+
   public static final long FIXED_OVERHEAD = ClassSize.align(
       ClassSize.OBJECT + (17 * ClassSize.REFERENCE) +
       (6 * Bytes.SIZEOF_LONG) + (3 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN +
       ClassSize.align(ClassSize.ARRAY));
-  
+
   public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
-      ClassSize.OBJECT + ClassSize.REENTRANT_LOCK + 
-      ClassSize.CONCURRENT_SKIPLISTMAP + 
+      ClassSize.OBJECT + ClassSize.REENTRANT_LOCK +
+      ClassSize.CONCURRENT_SKIPLISTMAP +
       ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + ClassSize.OBJECT);
-      
+
   @Override
   public long heapSize() {
     return DEEP_OVERHEAD + this.memstore.heapSize();