You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2010/05/07 21:26:51 UTC

svn commit: r942186 [12/18] - in /hadoop/hbase/trunk: ./ contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/ core/src/main/java/org/apache/hadoop/hbase/ core/src/main/java/org/apache/hadoop/hbase/client/ core/src/main/java/org/apache/...

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Writables.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Writables.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Writables.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/util/Writables.java Fri May  7 19:26:45 2010
@@ -35,7 +35,7 @@ import java.io.IOException;
 public class Writables {
   /**
    * @param w writable
-   * @return The bytes of <code>w</code> gotten by running its 
+   * @return The bytes of <code>w</code> gotten by running its
    * {@link Writable#write(java.io.DataOutput)} method.
    * @throws IOException e
    * @see #getWritable(byte[], Writable)
@@ -118,7 +118,7 @@ public class Writables {
   throws IOException {
     return (HRegionInfo)getWritable(bytes, new HRegionInfo());
   }
- 
+
   /**
    * @param bytes serialized bytes
    * @return A HRegionInfo instance built out of passed <code>bytes</code>

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java Fri May  7 19:26:45 2010
@@ -103,7 +103,7 @@ public class ZooKeeperWrapper implements
       "master");
     String stateZNodeName = conf.get("zookeeper.znode.state",
     "shutdown");
-    
+
     rootRegionZNode = getZNode(parentZNode, rootServerZNodeName);
     rsZNode = getZNode(parentZNode, rsZNodeName);
     masterElectionZNode = getZNode(parentZNode, masterAddressZNodeName);
@@ -193,31 +193,31 @@ public class ZooKeeperWrapper implements
     }
     return sb.toString();
   }
-  
+
   /**
    * Gets the statistics from the given server. Uses a 1 minute timeout.
-   * 
+   *
    * @param server  The server to get the statistics from.
    * @return The array of response strings.
    * @throws IOException When the socket communication fails.
    */
-  public String[] getServerStats(String server) 
+  public String[] getServerStats(String server)
   throws IOException {
     return getServerStats(server, 60 * 1000);
   }
-  
+
   /**
    * Gets the statistics from the given server.
-   * 
+   *
    * @param server  The server to get the statistics from.
    * @param timeout  The socket timeout to use.
    * @return The array of response strings.
    * @throws IOException When the socket communication fails.
    */
-  public String[] getServerStats(String server, int timeout) 
+  public String[] getServerStats(String server, int timeout)
   throws IOException {
     String[] sp = server.split(":");
-    Socket socket = new Socket(sp[0], 
+    Socket socket = new Socket(sp[0],
       sp.length > 1 ? Integer.parseInt(sp[1]) : 2181);
     socket.setSoTimeout(timeout);
     PrintWriter out = new PrintWriter(socket.getOutputStream(), true);
@@ -305,7 +305,7 @@ public class ZooKeeperWrapper implements
   public HServerAddress readMasterAddress(Watcher watcher) {
     return readAddress(masterElectionZNode, watcher);
   }
-  
+
   /**
    * Watch the state of the cluster, up or down
    * @param watcher Watcher to set on cluster state node
@@ -319,7 +319,7 @@ public class ZooKeeperWrapper implements
       LOG.warn("Failed to check on ZNode " + clusterStateZNode, e);
     }
   }
-  
+
   /**
    * Set the cluster state, up or down
    * @param up True to write the node, false to delete it
@@ -332,7 +332,7 @@ public class ZooKeeperWrapper implements
     try {
       if(up) {
         byte[] data = Bytes.toBytes("up");
-        zooKeeper.create(clusterStateZNode, data, 
+        zooKeeper.create(clusterStateZNode, data,
             Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
         LOG.debug("State node wrote in ZooKeeper");
       } else {
@@ -579,7 +579,7 @@ public class ZooKeeperWrapper implements
 
     return false;
   }
-  
+
   /**
    * Scans the regions servers directory
    * @return A list of server addresses
@@ -587,7 +587,7 @@ public class ZooKeeperWrapper implements
   public List<HServerAddress> scanRSDirectory() {
     return scanAddressDirectory(rsZNode, null);
   }
-  
+
   /**
    * Method used to make sure the region server directory is empty.
    *
@@ -605,7 +605,7 @@ public class ZooKeeperWrapper implements
       LOG.warn("Failed to delete " + rsZNode + " znodes in ZooKeeper: " + e);
     }
   }
-  
+
   private boolean checkExistenceOf(String path) {
     Stat stat = null;
     try {
@@ -630,7 +630,7 @@ public class ZooKeeperWrapper implements
       LOG.warn("Failed to close connection with ZooKeeper");
     }
   }
-  
+
   public String getZNode(String parentZNode, String znodeName) {
     return znodeName.charAt(0) == ZNODE_PATH_SEPARATOR ?
         znodeName : joinPath(parentZNode, znodeName);
@@ -731,6 +731,6 @@ public class ZooKeeperWrapper implements
     return conf.get(ZOOKEEPER_QUORUM)+":"+
           conf.get(ZOOKEEPER_ZNODE_PARENT);
   }
-  
-  
+
+
 }

Modified: hadoop/hbase/trunk/core/src/main/resources/hbase-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/resources/hbase-default.xml?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/resources/hbase-default.xml (original)
+++ hadoop/hbase/trunk/core/src/main/resources/hbase-default.xml Fri May  7 19:26:45 2010
@@ -66,7 +66,7 @@
     <value>2097152</value>
     <description>Size of the write buffer in bytes. A bigger buffer takes more
     memory -- on both the client and server side since server instantiates
-    the passed write buffer to process it -- but reduces the number of RPC.  
+    the passed write buffer to process it -- but reduces the number of RPC.
     For an estimate of server-side memory-used, evaluate
     hbase.client.write.buffer * hbase.regionserver.handler.count
     </description>
@@ -143,7 +143,7 @@
     instance. This is to set an upper boundary for a single entry saved in a
     storage file. Since they cannot be split it helps avoiding that a region
     cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero 
+    to set this to a fraction of the maximum region size. Setting it to zero
     or less disables the check.
     </description>
   </property>
@@ -231,7 +231,7 @@
   <property>
     <name>hbase.regionserver.dns.interface</name>
     <value>default</value>
-    <description>The name of the Network Interface from which a region server 
+    <description>The name of the Network Interface from which a region server
       should report its IP address.
     </description>
   </property>
@@ -246,7 +246,7 @@
   <property>
     <name>hbase.master.dns.interface</name>
     <value>default</value>
-    <description>The name of the Network Interface from which a master 
+    <description>The name of the Network Interface from which a master
       should report its IP address.
     </description>
   </property>
@@ -254,14 +254,14 @@
     <name>hbase.master.dns.nameserver</name>
     <value>default</value>
     <description>The host name or IP address of the name server (DNS)
-      which a master should use to determine the host name used 
+      which a master should use to determine the host name used
       for communication and display purposes.
     </description>
   </property>
   <property>
     <name>hbase.regionserver.global.memstore.upperLimit</name>
     <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new 
+    <description>Maximum size of all memstores in a region server before new
       updates are blocked and flushes are forced. Defaults to 40% of heap
     </description>
   </property>
@@ -269,12 +269,12 @@
     <name>hbase.regionserver.global.memstore.lowerLimit</name>
     <value>0.35</value>
     <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 30% of heap. 
+      memory, keep flushing until we hit this mark. Defaults to 30% of heap.
       This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to 
+      the minimum possible flushing to occur when updates are blocked due to
       memstore limiting.
     </description>
-  </property>  
+  </property>
   <property>
     <name>hbase.hbasemaster.maxregionopen</name>
     <value>120000</value>
@@ -359,7 +359,7 @@
     During a compaction, updates cannot be flushed to disk.  Long
     compactions require memory sufficient to carry the logging of
     all updates across the duration of the compaction.
-    
+
     If too large, clients timeout during compaction.
     </description>
   </property>

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/AbstractMergeTestBase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/AbstractMergeTestBase.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/AbstractMergeTestBase.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/AbstractMergeTestBase.java Fri May  7 19:26:45 2010
@@ -39,27 +39,27 @@ public abstract class AbstractMergeTestB
   protected HTableDescriptor desc;
   protected ImmutableBytesWritable value;
   protected boolean startMiniHBase;
-  
+
   public AbstractMergeTestBase() {
     this(true);
   }
-  
-  /** constructor 
+
+  /** constructor
    * @param startMiniHBase
    */
   public AbstractMergeTestBase(boolean startMiniHBase) {
     super();
-    
+
     this.startMiniHBase = startMiniHBase;
-    
+
     // We will use the same value for the rows as that is not really important here
-    
+
     String partialValue = String.valueOf(System.currentTimeMillis());
     StringBuilder val = new StringBuilder();
     while(val.length() < 1024) {
       val.append(partialValue);
     }
- 
+
     try {
       value = new ImmutableBytesWritable(
           val.toString().getBytes(HConstants.UTF8_ENCODING));
@@ -81,10 +81,10 @@ public abstract class AbstractMergeTestB
   public void preHBaseClusterSetup() throws Exception {
     conf.setLong("hbase.hregion.max.filesize", 64L * 1024L * 1024L);
 
-    // We create three data regions: The first is too large to merge since it 
-    // will be > 64 MB in size. The second two will be smaller and will be 
+    // We create three data regions: The first is too large to merge since it
+    // will be > 64 MB in size. The second two will be smaller and will be
     // selected for merging.
-    
+
     // To ensure that the first region is larger than 64MB we need to write at
     // least 65536 rows. We will make certain by writing 70000
 
@@ -103,12 +103,12 @@ public abstract class AbstractMergeTestB
       createAregion(row_70001, row_80001, 70001, 10000),
       createAregion(row_80001, null, 80001, 11000)
     };
-    
+
     // Now create the root and meta regions and insert the data regions
     // created above into the meta
 
     createRootAndMetaRegions();
-    
+
     for(int i = 0; i < regions.length; i++) {
       HRegion.addRegionToMETA(meta, regions[i]);
     }
@@ -118,9 +118,9 @@ public abstract class AbstractMergeTestB
 
   private HRegion createAregion(byte [] startKey, byte [] endKey, int firstRow,
       int nrows) throws IOException {
-    
+
     HRegion region = createNewHRegion(desc, startKey, endKey);
-    
+
     System.out.println("created region " +
         Bytes.toString(region.getRegionName()));
 

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java Fri May  7 19:26:45 2010
@@ -51,22 +51,22 @@ public abstract class HBaseClusterTestCa
   public HBaseClusterTestCase() {
     this(1);
   }
-  
+
   /**
    * Start a MiniHBaseCluster with regionServers region servers in-process to
    * start with. Also, start a MiniDfsCluster before starting the hbase cluster.
    * The configuration used will be edited so that this works correctly.
    * @param regionServers number of region servers to start.
-   */  
+   */
   public HBaseClusterTestCase(int regionServers) {
     this(regionServers, true);
   }
-  
+
   /**  in-process to
    * start with. Optionally, startDfs indicates if a MiniDFSCluster should be
    * started. If startDfs is false, the assumption is that an external DFS is
    * configured in hbase-site.xml and is already started, or you have started a
-   * MiniDFSCluster on your own and edited the configuration in memory. (You 
+   * MiniDFSCluster on your own and edited the configuration in memory. (You
    * can modify the config used by overriding the preHBaseClusterSetup method.)
    * @param regionServers number of region servers to start.
    * @param startDfs set to true if MiniDFS should be started
@@ -83,12 +83,12 @@ public abstract class HBaseClusterTestCa
 
   /**
    * Subclass hook.
-   * 
+   *
    * Run after dfs is ready but before hbase cluster is started up.
    */
   protected void preHBaseClusterSetup() throws Exception {
     // continue
-  } 
+  }
 
   /**
    * Actually start the MiniHBase instance.
@@ -110,13 +110,13 @@ public abstract class HBaseClusterTestCa
       new HTable(conf, HConstants.META_TABLE_NAME);
     }
   }
-  
+
   /**
    * Run after hbase cluster is started up.
    */
   protected void postHBaseClusterSetup() throws Exception {
     // continue
-  } 
+  }
 
   @Override
   protected void setUp() throws Exception {
@@ -139,9 +139,9 @@ public abstract class HBaseClusterTestCa
       // do the super setup now. if we had done it first, then we would have
       // gotten our conf all mangled and a local fs started up.
       super.setUp();
-    
+
       // run the pre-cluster setup
-      preHBaseClusterSetup();    
+      preHBaseClusterSetup();
 
       // start the instance
       hBaseClusterSetup();
@@ -194,7 +194,7 @@ public abstract class HBaseClusterTestCa
     //  "Temporary end-of-test thread dump debugging HADOOP-2040: " + getName());
   }
 
-  
+
   /**
    * Use this utility method debugging why cluster won't go down.  On a
    * period it throws a thread dump.  Method ends when all cluster

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java Fri May  7 19:26:45 2010
@@ -72,11 +72,11 @@ public abstract class HBaseTestCase exte
   protected static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
   protected String START_KEY;
   protected static final int MAXVERSIONS = 3;
-  
+
   static {
     initialize();
   }
-  
+
   public volatile HBaseConfiguration conf;
 
   /** constructor */
@@ -84,7 +84,7 @@ public abstract class HBaseTestCase exte
     super();
     init();
   }
-  
+
   /**
    * @param name
    */
@@ -92,7 +92,7 @@ public abstract class HBaseTestCase exte
     super(name);
     init();
   }
-  
+
   private void init() {
     conf = new HBaseConfiguration();
     try {
@@ -102,7 +102,7 @@ public abstract class HBaseTestCase exte
       fail();
     }
   }
-  
+
   /**
    * Note that this method must be called after the mini hdfs cluster has
    * started or we end up with a local file system.
@@ -131,7 +131,7 @@ public abstract class HBaseTestCase exte
       throw e;
     }
   }
-  
+
   @Override
   protected void tearDown() throws Exception {
     try {
@@ -158,11 +158,11 @@ public abstract class HBaseTestCase exte
     Path rootdir = filesystem.makeQualified(
         new Path(conf.get(HConstants.HBASE_DIR)));
     filesystem.mkdirs(rootdir);
-    
+
     return HRegion.createHRegion(new HRegionInfo(desc, startKey, endKey),
         rootdir, conf);
   }
-  
+
   protected HRegion openClosedRegion(final HRegion closedRegion)
   throws IOException {
     HRegion r = new HRegion(closedRegion.getBaseDir(), closedRegion.getLog(),
@@ -171,7 +171,7 @@ public abstract class HBaseTestCase exte
     r.initialize(null, null);
     return r;
   }
-  
+
   /**
    * Create a table of name <code>name</code> with {@link COLUMNS} for
    * families.
@@ -181,7 +181,7 @@ public abstract class HBaseTestCase exte
   protected HTableDescriptor createTableDescriptor(final String name) {
     return createTableDescriptor(name, MAXVERSIONS);
   }
-  
+
   /**
    * Create a table of name <code>name</code> with {@link COLUMNS} for
    * families.
@@ -204,7 +204,7 @@ public abstract class HBaseTestCase exte
         false, HConstants.REPLICATION_SCOPE_LOCAL));
     return htd;
   }
-  
+
   /**
    * Add content to region <code>r</code> on the passed column
    * <code>column</code>.
@@ -261,13 +261,13 @@ public abstract class HBaseTestCase exte
   throws IOException {
     return addContent(updater, columnFamily, null, startKeyBytes, endKey, -1);
   }
-  
+
   protected static long addContent(final Incommon updater, final String family,
                                    final String column, final byte [] startKeyBytes,
                                    final byte [] endKey) throws IOException {
     return addContent(updater, family, column, startKeyBytes, endKey, -1);
   }
-  
+
   /**
    * Add content to region <code>r</code> on the passed column
    * <code>column</code>.
@@ -337,7 +337,7 @@ public abstract class HBaseTestCase exte
             } catch (IOException ex) {
               ex.printStackTrace();
               throw ex;
-            } 
+            }
           } catch (RuntimeException ex) {
             ex.printStackTrace();
             throw ex;
@@ -353,7 +353,7 @@ public abstract class HBaseTestCase exte
     }
     return count;
   }
-  
+
   /**
    * Implementors can flushcache.
    */
@@ -363,16 +363,16 @@ public abstract class HBaseTestCase exte
      */
     public void flushcache() throws IOException;
   }
-  
+
   /**
    * Interface used by tests so can do common operations against an HTable
    * or an HRegion.
-   * 
+   *
    * TOOD: Come up w/ a better name for this interface.
    */
   public static interface Incommon {
     /**
-     * 
+     *
      * @param delete
      * @param lockid
      * @param writeToWAL
@@ -388,7 +388,7 @@ public abstract class HBaseTestCase exte
     public void put(Put put) throws IOException;
 
     public Result get(Get get) throws IOException;
-    
+
     /**
      * @param family
      * @param qualifiers
@@ -401,35 +401,35 @@ public abstract class HBaseTestCase exte
         byte [] firstRow, long ts)
     throws IOException;
   }
-  
+
   /**
    * A class that makes a {@link Incommon} out of a {@link HRegion}
    */
   public static class HRegionIncommon implements Incommon, FlushCache {
     final HRegion region;
-    
+
     /**
      * @param HRegion
      */
     public HRegionIncommon(final HRegion HRegion) {
       this.region = HRegion;
     }
-    
+
     public void put(Put put) throws IOException {
       region.put(put);
     }
-    
+
     public void delete(Delete delete,  Integer lockid, boolean writeToWAL)
     throws IOException {
       this.region.delete(delete, lockid, writeToWAL);
     }
-    
+
     public Result get(Get get) throws IOException {
       return region.get(get, null);
     }
-    
+
     public ScannerIncommon getScanner(byte [] family, byte [][] qualifiers,
-        byte [] firstRow, long ts) 
+        byte [] firstRow, long ts)
       throws IOException {
         Scan scan = new Scan(firstRow);
         if(qualifiers == null || qualifiers.length == 0) {
@@ -440,14 +440,14 @@ public abstract class HBaseTestCase exte
           }
         }
         scan.setTimeRange(0, ts);
-        return new 
+        return new
           InternalScannerIncommon(region.getScanner(scan));
       }
-    
+
     public Result get(Get get, Integer lockid) throws IOException{
       return this.region.get(get, lockid);
     }
-    
+
 
     public void flushcache() throws IOException {
       this.region.flushcache();
@@ -467,23 +467,23 @@ public abstract class HBaseTestCase exte
       super();
       this.table = table;
     }
-    
+
     public void put(Put put) throws IOException {
       table.put(put);
     }
-    
-    
+
+
     public void delete(Delete delete,  Integer lockid, boolean writeToWAL)
     throws IOException {
       this.table.delete(delete);
     }
-    
+
     public Result get(Get get) throws IOException {
       return table.get(get);
     }
 
     public ScannerIncommon getScanner(byte [] family, byte [][] qualifiers,
-        byte [] firstRow, long ts) 
+        byte [] firstRow, long ts)
       throws IOException {
       Scan scan = new Scan(firstRow);
       if(qualifiers == null || qualifiers.length == 0) {
@@ -494,25 +494,25 @@ public abstract class HBaseTestCase exte
         }
       }
       scan.setTimeRange(0, ts);
-      return new 
+      return new
         ClientScannerIncommon(table.getScanner(scan));
     }
   }
-  
-  public interface ScannerIncommon 
+
+  public interface ScannerIncommon
   extends Iterable<Result> {
     public boolean next(List<KeyValue> values)
     throws IOException;
-    
+
     public void close() throws IOException;
   }
-  
+
   public static class ClientScannerIncommon implements ScannerIncommon {
     ResultScanner scanner;
     public ClientScannerIncommon(ResultScanner scanner) {
       this.scanner = scanner;
     }
-    
+
     public boolean next(List<KeyValue> values)
     throws IOException {
       Result results = scanner.next();
@@ -523,38 +523,38 @@ public abstract class HBaseTestCase exte
       values.addAll(results.list());
       return true;
     }
-    
+
     public void close() throws IOException {
       scanner.close();
     }
-    
+
     @SuppressWarnings("unchecked")
     public Iterator iterator() {
       return scanner.iterator();
     }
   }
-  
+
   public static class InternalScannerIncommon implements ScannerIncommon {
     InternalScanner scanner;
-    
+
     public InternalScannerIncommon(InternalScanner scanner) {
       this.scanner = scanner;
     }
-    
+
     public boolean next(List<KeyValue> results)
     throws IOException {
       return scanner.next(results);
     }
-    
+
     public void close() throws IOException {
       scanner.close();
     }
-    
+
     public Iterator<Result> iterator() {
       throw new UnsupportedOperationException();
     }
   }
-  
+
 //  protected void assertCellEquals(final HRegion region, final byte [] row,
 //    final byte [] column, final long timestamp, final String value)
 //  throws IOException {
@@ -565,11 +565,11 @@ public abstract class HBaseTestCase exte
 //        cell_value);
 //    } else {
 //      if (cell_value == null) {
-//        fail(Bytes.toString(column) + " at timestamp " + timestamp + 
+//        fail(Bytes.toString(column) + " at timestamp " + timestamp +
 //          "\" was expected to be \"" + value + " but was null");
 //      }
 //      if (cell_value != null) {
-//        assertEquals(Bytes.toString(column) + " at timestamp " 
+//        assertEquals(Bytes.toString(column) + " at timestamp "
 //            + timestamp, value, new String(cell_value.getValue()));
 //      }
 //    }
@@ -582,30 +582,30 @@ public abstract class HBaseTestCase exte
       Get get = new Get(row);
       get.setTimeStamp(timestamp);
       Result res = region.get(get, null);
-      NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = 
+      NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map =
         res.getMap();
       byte [] res_value = map.get(family).get(qualifier).get(timestamp);
-    
+
       if (value == null) {
         assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) +
             " at timestamp " + timestamp, null, res_value);
       } else {
         if (res_value == null) {
-          fail(Bytes.toString(family) + " " + Bytes.toString(qualifier) + 
-              " at timestamp " + timestamp + "\" was expected to be \"" + 
+          fail(Bytes.toString(family) + " " + Bytes.toString(qualifier) +
+              " at timestamp " + timestamp + "\" was expected to be \"" +
               Bytes.toStringBinary(value) + " but was null");
         }
         if (res_value != null) {
           assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) +
-              " at timestamp " + 
+              " at timestamp " +
               timestamp, value, new String(res_value));
         }
       }
     }
-  
+
   /**
    * Initializes parameters used in the test environment:
-   * 
+   *
    * Sets the configuration parameter TEST_DIRECTORY_KEY if not already set.
    * Sets the boolean debugging if "DEBUGGING" is set in the environment.
    * If debugging is enabled, reconfigures logging so that the root log level is
@@ -620,7 +620,7 @@ public abstract class HBaseTestCase exte
 
   /**
    * Common method to close down a MiniDFSCluster and the associated file system
-   * 
+   *
    * @param cluster
    */
   public static void shutdownDfs(MiniDFSCluster cluster) {
@@ -645,14 +645,14 @@ public abstract class HBaseTestCase exte
       }
     }
   }
-  
+
   protected void createRootAndMetaRegions() throws IOException {
     root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir, conf);
-    meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir, 
+    meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir,
         conf);
     HRegion.addRegionToMETA(root, meta);
   }
-  
+
   protected void closeRootAndMeta() throws IOException {
     if (meta != null) {
       meta.close();

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Fri May  7 19:26:45 2010
@@ -138,7 +138,7 @@ public class HBaseTestingUtility {
 
   /**
    * Start up a minicluster of hbase, dfs, and zookeeper.
-   * @throws Exception 
+   * @throws Exception
    */
   public void startMiniCluster() throws Exception {
     startMiniCluster(1);
@@ -148,7 +148,7 @@ public class HBaseTestingUtility {
    * Call this if you only want a zk cluster.
    * @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
    * @throws Exception
-   * @see #shutdownMiniZKCluster() 
+   * @see #shutdownMiniZKCluster()
    */
   public void startMiniZKCluster() throws Exception {
     isRunningCluster();
@@ -169,7 +169,7 @@ public class HBaseTestingUtility {
    * @see #startMiniZKCluster()
    */
   public void shutdownMiniZKCluster() throws IOException {
-    if (this.zkCluster != null) this.zkCluster.shutdown(); 
+    if (this.zkCluster != null) this.zkCluster.shutdown();
   }
 
   /**
@@ -203,7 +203,7 @@ public class HBaseTestingUtility {
     // the TEST_DIRECTORY_KEY to make bad blocks, a feature we are not using,
     // but otherwise, just in constructor.
     System.setProperty(TEST_DIRECTORY_KEY, oldBuildTestDir);
- 
+
     // Mangle conf so fs parameter points to minidfs we just started up
     FileSystem fs = this.dfsCluster.getFileSystem();
     this.conf.set("fs.defaultFS", fs.getUri().toString());
@@ -279,7 +279,7 @@ public class HBaseTestingUtility {
    * @return An HTable instance for the created table.
    * @throws IOException
    */
-  public HTable createTable(byte[] tableName, byte[] family) 
+  public HTable createTable(byte[] tableName, byte[] family)
   throws IOException{
     return createTable(tableName, new byte[][]{family});
   }
@@ -291,7 +291,7 @@ public class HBaseTestingUtility {
    * @return An HTable instance for the created table.
    * @throws IOException
    */
-  public HTable createTable(byte[] tableName, byte[][] families) 
+  public HTable createTable(byte[] tableName, byte[][] families)
   throws IOException {
     HTableDescriptor desc = new HTableDescriptor(tableName);
     for(byte[] family : families) {
@@ -408,16 +408,16 @@ public class HBaseTestingUtility {
     }
     return rowCount;
   }
-  
+
   /**
    * Creates many regions names "aaa" to "zzz".
-   * 
+   *
    * @param table  The table to use for the data.
    * @param columnFamily  The family to insert the data into.
    * @return count of regions created.
    * @throws IOException When creating the regions fails.
    */
-  public int createMultiRegions(HTable table, byte[] columnFamily) 
+  public int createMultiRegions(HTable table, byte[] columnFamily)
   throws IOException {
     return createMultiRegions(getConfiguration(), table, columnFamily);
   }
@@ -431,11 +431,11 @@ public class HBaseTestingUtility {
    * @throws IOException When creating the regions fails.
    */
   public int createMultiRegions(final Configuration c, final HTable table,
-      final byte[] columnFamily) 
+      final byte[] columnFamily)
   throws IOException {
     byte[][] KEYS = {
       HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
-      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), 
+      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
       Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
       Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
       Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
@@ -452,18 +452,18 @@ public class HBaseTestingUtility {
       htd.addFamily(hcd);
     }
     // remove empty region - this is tricky as the mini cluster during the test
-    // setup already has the "<tablename>,,123456789" row with an empty start 
-    // and end key. Adding the custom regions below adds those blindly, 
-    // including the new start region from empty to "bbb". lg 
+    // setup already has the "<tablename>,,123456789" row with an empty start
+    // and end key. Adding the custom regions below adds those blindly,
+    // including the new start region from empty to "bbb". lg
     List<byte[]> rows = getMetaTableRows();
     // add custom ones
     int count = 0;
     for (int i = 0; i < KEYS.length; i++) {
       int j = (i + 1) % KEYS.length;
-      HRegionInfo hri = new HRegionInfo(table.getTableDescriptor(), 
+      HRegionInfo hri = new HRegionInfo(table.getTableDescriptor(),
         KEYS[i], KEYS[j]);
       Put put = new Put(hri.getRegionName());
-      put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, 
+      put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
         Writables.getBytes(hri));
       meta.put(put);
       LOG.info("createMultiRegions: inserted " + hri.toString());
@@ -471,7 +471,7 @@ public class HBaseTestingUtility {
     }
     // see comment above, remove "old" (or previous) single region
     for (byte[] row : rows) {
-      LOG.info("createMultiRegions: deleting meta row -> " + 
+      LOG.info("createMultiRegions: deleting meta row -> " +
         Bytes.toStringBinary(row));
       meta.delete(new Delete(row));
     }
@@ -491,7 +491,7 @@ public class HBaseTestingUtility {
     List<byte[]> rows = new ArrayList<byte[]>();
     ResultScanner s = t.getScanner(new Scan());
     for (Result result : s) {
-      LOG.info("getMetaTableRows: row -> " + 
+      LOG.info("getMetaTableRows: row -> " +
         Bytes.toStringBinary(result.getRow()));
       rows.add(result.getRow());
     }
@@ -509,7 +509,7 @@ public class HBaseTestingUtility {
     ArrayList<Delete> deletes = new ArrayList<Delete>();
     ResultScanner s = t.getScanner(new Scan());
     for (Result result : s) {
-      LOG.info("emptyMetaTable: remove row -> " + 
+      LOG.info("emptyMetaTable: remove row -> " +
         Bytes.toStringBinary(result.getRow()));
       Delete del = new Delete(result.getRow());
       deletes.add(del);
@@ -517,9 +517,9 @@ public class HBaseTestingUtility {
     s.close();
     t.delete(deletes);
   }
-  
+
   /**
-   * Starts a <code>MiniMRCluster</code> with a default number of 
+   * Starts a <code>MiniMRCluster</code> with a default number of
    * <code>TaskTracker</code>'s.
    *
    * @throws IOException When starting the cluster fails.
@@ -527,7 +527,7 @@ public class HBaseTestingUtility {
   public void startMiniMapReduceCluster() throws IOException {
     startMiniMapReduceCluster(2);
   }
-  
+
   /**
    * Starts a <code>MiniMRCluster</code>.
    *
@@ -540,13 +540,13 @@ public class HBaseTestingUtility {
     Configuration c = getConfiguration();
     System.setProperty("hadoop.log.dir", c.get("hadoop.log.dir"));
     c.set("mapred.output.dir", c.get("hadoop.tmp.dir"));
-    mrCluster = new MiniMRCluster(servers, 
+    mrCluster = new MiniMRCluster(servers,
       FileSystem.get(c).getUri().toString(), 1);
     LOG.info("Mini mapreduce cluster started");
   }
-  
+
   /**
-   * Stops the previously started <code>MiniMRCluster</code>. 
+   * Stops the previously started <code>MiniMRCluster</code>.
    */
   public void shutdownMiniMapReduceCluster() {
     LOG.info("Stopping mini mapreduce cluster...");
@@ -608,13 +608,13 @@ public class HBaseTestingUtility {
 
   /**
    * Get the HBase cluster.
-   * 
+   *
    * @return hbase cluster
    */
   public MiniHBaseCluster getHBaseCluster() {
     return hbaseCluster;
   }
-  
+
   /**
    * Returns a HBaseAdmin instance.
    *
@@ -627,9 +627,9 @@ public class HBaseTestingUtility {
     }
     return hbaseAdmin;
   }
-  
+
   /**
-   * Closes the named region. 
+   * Closes the named region.
    *
    * @param regionName  The region to close.
    * @throws IOException
@@ -637,9 +637,9 @@ public class HBaseTestingUtility {
   public void closeRegion(String regionName) throws IOException {
     closeRegion(Bytes.toBytes(regionName));
   }
-  
+
   /**
-   * Closes the named region. 
+   * Closes the named region.
    *
    * @param regionName  The region to close.
    * @throws IOException
@@ -648,9 +648,9 @@ public class HBaseTestingUtility {
     HBaseAdmin admin = getHBaseAdmin();
     admin.closeRegion(regionName, (Object[]) null);
   }
-  
+
   /**
-   * Closes the region containing the given row. 
+   * Closes the region containing the given row.
    *
    * @param row  The row to find the containing region.
    * @param table  The table to find the region.
@@ -661,7 +661,7 @@ public class HBaseTestingUtility {
   }
 
   /**
-   * Closes the region containing the given row. 
+   * Closes the region containing the given row.
    *
    * @param row  The row to find the containing region.
    * @param table  The table to find the region.

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java Fri May  7 19:26:45 2010
@@ -42,14 +42,14 @@ import org.apache.hadoop.hbase.util.Byte
  * </p>
  */
 public class HFilePerformanceEvaluation {
-  
+
   private static final int ROW_LENGTH = 10;
   private static final int ROW_COUNT = 1000000;
   private static final int RFILE_BLOCKSIZE = 8 * 1024;
-  
+
   static final Log LOG =
     LogFactory.getLog(HFilePerformanceEvaluation.class.getName());
-  
+
   static byte [] format(final int i) {
     String v = Integer.toString(i);
     return Bytes.toBytes("0000000000".substring(v.length()) + v);
@@ -110,9 +110,9 @@ public class HFilePerformanceEvaluation 
         }
       }
     });
-    
+
   }
-  
+
   protected void runBenchmark(RowOrientedBenchmark benchmark, int rowCount)
     throws Exception {
     LOG.info("Running " + benchmark.getClass().getSimpleName() + " for " +
@@ -121,14 +121,14 @@ public class HFilePerformanceEvaluation 
     LOG.info("Running " + benchmark.getClass().getSimpleName() + " for " +
         rowCount + " rows took " + elapsedTime + "ms.");
   }
-  
+
   static abstract class RowOrientedBenchmark {
-    
+
     protected final Configuration conf;
     protected final FileSystem fs;
     protected final Path mf;
     protected final int totalRows;
-    
+
     public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf,
         int totalRows) {
       this.conf = conf;
@@ -136,21 +136,21 @@ public class HFilePerformanceEvaluation 
       this.mf = mf;
       this.totalRows = totalRows;
     }
-    
+
     void setUp() throws Exception {
       // do nothing
     }
-    
+
     abstract void doRow(int i) throws Exception;
-    
+
     protected int getReportingPeriod() {
       return this.totalRows / 10;
     }
-    
+
     void tearDown() throws Exception {
       // do nothing
     }
-    
+
     /**
      * Run benchmark
      * @return elapsed time.
@@ -173,76 +173,76 @@ public class HFilePerformanceEvaluation 
       }
       return elapsedTime;
     }
-    
+
   }
-  
+
   static class SequentialWriteBenchmark extends RowOrientedBenchmark {
     protected HFile.Writer writer;
     private Random random = new Random();
     private byte[] bytes = new byte[ROW_LENGTH];
-    
+
     public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf,
         int totalRows) {
       super(conf, fs, mf, totalRows);
     }
-    
+
     @Override
     void setUp() throws Exception {
       writer = new HFile.Writer(this.fs, this.mf, RFILE_BLOCKSIZE, (Compression.Algorithm) null, null);
     }
-    
+
     @Override
     void doRow(int i) throws Exception {
-      writer.append(format(i), generateValue()); 
+      writer.append(format(i), generateValue());
     }
-    
+
     private byte[] generateValue() {
       random.nextBytes(bytes);
       return bytes;
     }
-    
+
     @Override
     protected int getReportingPeriod() {
       return this.totalRows; // don't report progress
     }
-    
+
     @Override
     void tearDown() throws Exception {
       writer.close();
     }
-    
+
   }
-  
+
   static abstract class ReadBenchmark extends RowOrientedBenchmark {
-    
+
     protected HFile.Reader reader;
-    
+
     public ReadBenchmark(Configuration conf, FileSystem fs, Path mf,
         int totalRows) {
       super(conf, fs, mf, totalRows);
     }
-    
+
     @Override
     void setUp() throws Exception {
       reader = new HFile.Reader(this.fs, this.mf, null, false);
       this.reader.loadFileInfo();
     }
-    
+
     @Override
     void tearDown() throws Exception {
       reader.close();
     }
-    
+
   }
 
   static class SequentialReadBenchmark extends ReadBenchmark {
     private HFileScanner scanner;
-    
+
     public SequentialReadBenchmark(Configuration conf, FileSystem fs,
       Path mf, int totalRows) {
       super(conf, fs, mf, totalRows);
     }
-    
+
     @Override
     void setUp() throws Exception {
       super.setUp();
@@ -259,16 +259,16 @@ public class HFilePerformanceEvaluation 
         PerformanceEvaluationCommons.assertValueSize(v.limit(), ROW_LENGTH);
       }
     }
-    
+
     @Override
     protected int getReportingPeriod() {
       return this.totalRows; // don't report progress
     }
-    
+
   }
-  
+
   static class UniformRandomReadBenchmark extends ReadBenchmark {
-    
+
     private Random random = new Random();
 
     public UniformRandomReadBenchmark(Configuration conf, FileSystem fs,
@@ -286,12 +286,12 @@ public class HFilePerformanceEvaluation 
       ByteBuffer v = scanner.getValue();
       PerformanceEvaluationCommons.assertValueSize(v.limit(), ROW_LENGTH);
     }
-    
+
     private byte [] getRandomRow() {
       return format(random.nextInt(totalRows));
     }
   }
-  
+
   static class UniformRandomSmallScan extends ReadBenchmark {
     private Random random = new Random();
 
@@ -319,14 +319,14 @@ public class HFilePerformanceEvaluation 
         PerformanceEvaluationCommons.assertValueSize(v.limit(), ROW_LENGTH);
       }
     }
-    
+
     private byte [] getRandomRow() {
       return format(random.nextInt(totalRows));
     }
   }
-  
+
   static class GaussianRandomReadBenchmark extends ReadBenchmark {
-    
+
     private RandomData randomData = new RandomDataImpl();
 
     public GaussianRandomReadBenchmark(Configuration conf, FileSystem fs,
@@ -353,11 +353,11 @@ public class HFilePerformanceEvaluation 
       return format(r);
     }
   }
-  
+
   /**
    * @param args
-   * @throws Exception 
-   * @throws IOException 
+   * @throws Exception
+   * @throws IOException
    */
   public static void main(String[] args) throws Exception {
     new HFilePerformanceEvaluation().runBenchmarks();

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java Fri May  7 19:26:45 2010
@@ -43,7 +43,7 @@ public class MapFilePerformanceEvaluatio
   protected final HBaseConfiguration conf;
   private static final int ROW_LENGTH = 10;
   private static final int ROW_COUNT = 100000;
-  
+
   static final Log LOG =
     LogFactory.getLog(MapFilePerformanceEvaluation.class.getName());
 
@@ -54,7 +54,7 @@ public class MapFilePerformanceEvaluatio
     super();
     this.conf = c;
   }
-  
+
   static ImmutableBytesWritable format(final int i, ImmutableBytesWritable w) {
     String v = Integer.toString(i);
     w.set(Bytes.toBytes("0000000000".substring(v.length()) + v));
@@ -69,7 +69,7 @@ public class MapFilePerformanceEvaluatio
     }
     runBenchmark(new SequentialWriteBenchmark(conf, fs, mf, ROW_COUNT),
         ROW_COUNT);
-    
+
     PerformanceEvaluationCommons.concurrentReads(new Runnable() {
       public void run() {
         try {
@@ -111,7 +111,7 @@ public class MapFilePerformanceEvaluatio
       }
     });
   }
-  
+
   protected void runBenchmark(RowOrientedBenchmark benchmark, int rowCount)
     throws Exception {
     LOG.info("Running " + benchmark.getClass().getSimpleName() + " for " +
@@ -120,14 +120,14 @@ public class MapFilePerformanceEvaluatio
     LOG.info("Running " + benchmark.getClass().getSimpleName() + " for " +
         rowCount + " rows took " + elapsedTime + "ms.");
   }
-  
+
   static abstract class RowOrientedBenchmark {
-    
+
     protected final Configuration conf;
     protected final FileSystem fs;
     protected final Path mf;
     protected final int totalRows;
-    
+
     public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf,
         int totalRows) {
       this.conf = conf;
@@ -135,21 +135,21 @@ public class MapFilePerformanceEvaluatio
       this.mf = mf;
       this.totalRows = totalRows;
     }
-    
+
     void setUp() throws Exception {
       // do nothing
     }
-    
+
     abstract void doRow(int i) throws Exception;
-    
+
     protected int getReportingPeriod() {
       return this.totalRows / 10;
     }
-    
+
     void tearDown() throws Exception {
       // do nothing
     }
-    
+
     /**
      * Run benchmark
      * @return elapsed time.
@@ -172,77 +172,77 @@ public class MapFilePerformanceEvaluatio
       }
       return elapsedTime;
     }
-    
+
   }
-  
+
   static class SequentialWriteBenchmark extends RowOrientedBenchmark {
-    
+
     protected MapFile.Writer writer;
     private Random random = new Random();
     private byte[] bytes = new byte[ROW_LENGTH];
     private ImmutableBytesWritable key = new ImmutableBytesWritable();
     private ImmutableBytesWritable value = new ImmutableBytesWritable();
-    
+
     public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf,
         int totalRows) {
       super(conf, fs, mf, totalRows);
     }
-    
+
     @Override
     void setUp() throws Exception {
       writer = new MapFile.Writer(conf, fs, mf.toString(),
         ImmutableBytesWritable.class, ImmutableBytesWritable.class);
     }
-    
+
     @Override
     void doRow(int i) throws Exception {
       value.set(generateValue());
-      writer.append(format(i, key), value); 
+      writer.append(format(i, key), value);
     }
-    
+
     private byte[] generateValue() {
       random.nextBytes(bytes);
       return bytes;
     }
-    
+
     @Override
     protected int getReportingPeriod() {
       return this.totalRows; // don't report progress
     }
-    
+
     @Override
     void tearDown() throws Exception {
       writer.close();
     }
-    
+
   }
-  
+
   static abstract class ReadBenchmark extends RowOrientedBenchmark {
     ImmutableBytesWritable key = new ImmutableBytesWritable();
     ImmutableBytesWritable value = new ImmutableBytesWritable();
-    
+
     protected MapFile.Reader reader;
-    
+
     public ReadBenchmark(Configuration conf, FileSystem fs, Path mf,
         int totalRows) {
       super(conf, fs, mf, totalRows);
     }
-    
+
     @Override
     void setUp() throws Exception {
       reader = new MapFile.Reader(fs, mf.toString(), conf);
     }
-    
+
     @Override
     void tearDown() throws Exception {
       reader.close();
     }
-    
+
   }
 
   static class SequentialReadBenchmark extends ReadBenchmark {
     ImmutableBytesWritable verify = new ImmutableBytesWritable();
-    
+
     public SequentialReadBenchmark(Configuration conf, FileSystem fs,
         Path mf, int totalRows) {
       super(conf, fs, mf, totalRows);
@@ -255,16 +255,16 @@ public class MapFilePerformanceEvaluatio
         format(i, this.verify).get());
       PerformanceEvaluationCommons.assertValueSize(ROW_LENGTH, value.getSize());
     }
-    
+
     @Override
     protected int getReportingPeriod() {
       return this.totalRows; // don't report progress
     }
-    
+
   }
-  
+
   static class UniformRandomReadBenchmark extends ReadBenchmark {
-    
+
     private Random random = new Random();
 
     public UniformRandomReadBenchmark(Configuration conf, FileSystem fs,
@@ -278,13 +278,13 @@ public class MapFilePerformanceEvaluatio
       ImmutableBytesWritable r = (ImmutableBytesWritable)reader.get(k, value);
       PerformanceEvaluationCommons.assertValueSize(r.getSize(), ROW_LENGTH);
     }
-    
+
     private ImmutableBytesWritable getRandomRow() {
       return format(random.nextInt(totalRows), key);
     }
-    
+
   }
-  
+
   static class UniformRandomSmallScan extends ReadBenchmark {
     private Random random = new Random();
 
@@ -308,7 +308,7 @@ public class MapFilePerformanceEvaluatio
         PerformanceEvaluationCommons.assertValueSize(this.value.getSize(), ROW_LENGTH);
       }
     }
-    
+
     private ImmutableBytesWritable getRandomRow() {
       return format(random.nextInt(totalRows), key);
     }
@@ -328,19 +328,19 @@ public class MapFilePerformanceEvaluatio
       ImmutableBytesWritable r = (ImmutableBytesWritable)reader.get(k, value);
       PerformanceEvaluationCommons.assertValueSize(r.getSize(), ROW_LENGTH);
     }
-    
+
     private ImmutableBytesWritable getGaussianRandomRow() {
       int r = (int) randomData.nextGaussian((double)totalRows / 2.0,
           (double)totalRows / 10.0);
       return format(r, key);
     }
-    
+
   }
 
   /**
    * @param args
-   * @throws Exception 
-   * @throws IOException 
+   * @throws Exception
+   * @throws IOException
    */
   public static void main(String[] args) throws Exception {
     new MapFilePerformanceEvaluation(new HBaseConfiguration()).

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java Fri May  7 19:26:45 2010
@@ -49,12 +49,12 @@ public class MiniHBaseCluster implements
   public LocalHBaseCluster hbaseCluster;
 
   /**
-   * Start a MiniHBaseCluster. 
+   * Start a MiniHBaseCluster.
    * @param conf Configuration to be used for cluster
    * @param numRegionServers initial number of region servers to start.
    * @throws IOException
    */
-  public MiniHBaseCluster(Configuration conf, int numRegionServers) 
+  public MiniHBaseCluster(Configuration conf, int numRegionServers)
   throws IOException {
     this.conf = conf;
     init(numRegionServers);
@@ -239,7 +239,7 @@ public class MiniHBaseCluster implements
 
   /**
    * Shut down the mini HBase cluster
-   * @throws IOException 
+   * @throws IOException
    */
   public void shutdown() throws IOException {
     if (this.hbaseCluster != null) {

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/MultiRegionTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/MultiRegionTable.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/MultiRegionTable.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/MultiRegionTable.java Fri May  7 19:26:45 2010
@@ -55,7 +55,7 @@ public class MultiRegionTable extends HB
     Bytes.toBytes("xxx"),
     Bytes.toBytes("yyy")
   };
-  
+
   protected final byte [] columnFamily;
   protected HTableDescriptor desc;
 
@@ -68,13 +68,13 @@ public class MultiRegionTable extends HB
 
   public MultiRegionTable(int nServers, final String familyName) {
     super(nServers);
-    
+
      this.columnFamily = Bytes.toBytes(familyName);
     // These are needed for the new and improved Map/Reduce framework
     System.setProperty("hadoop.log.dir", conf.get("hadoop.log.dir"));
     conf.set("mapred.output.dir", conf.get("hadoop.tmp.dir"));
   }
-  
+
   /**
    * Run after dfs is ready but before hbase cluster is started up.
    */
@@ -96,13 +96,13 @@ public class MultiRegionTable extends HB
       for(int i = 0; i < regions.length; i++) {
         HRegion.addRegionToMETA(meta, regions[i]);
       }
-      
+
       closeRootAndMeta();
     } catch (Exception e) {
       shutdownDfs(dfsCluster);
       throw e;
     }
-  } 
+  }
 
   private HRegion createARegion(byte [] startKey, byte [] endKey) throws IOException {
     HRegion region = createNewHRegion(desc, startKey, endKey);
@@ -110,7 +110,7 @@ public class MultiRegionTable extends HB
     closeRegionAndDeleteLog(region);
     return region;
   }
-  
+
   private void closeRegionAndDeleteLog(HRegion region) throws IOException {
     region.close();
     region.getLog().closeAndDelete();

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java Fri May  7 19:26:45 2010
@@ -84,22 +84,22 @@ import org.apache.hadoop.util.LineReader
  * command-line which test to run and how many clients are participating in
  * this experiment. Run <code>java PerformanceEvaluation --help</code> to
  * obtain usage.
- * 
+ *
  * <p>This class sets up and runs the evaluation programs described in
  * Section 7, <i>Performance Evaluation</i>, of the <a
  * href="http://labs.google.com/papers/bigtable.html">Bigtable</a>
  * paper, pages 8-10.
- * 
+ *
  * <p>If number of clients > 1, we start up a MapReduce job. Each map task
  * runs an individual client. Each client does about 1GB of data.
  */
 public class PerformanceEvaluation implements HConstants {
   protected static final Log LOG = LogFactory.getLog(PerformanceEvaluation.class.getName());
-  
+
   private static final int ROW_LENGTH = 1000;
   private static final int ONE_GB = 1024 * 1024 * 1000;
   private static final int ROWS_PER_GB = ONE_GB / ROW_LENGTH;
-  
+
   public static final byte [] TABLE_NAME = Bytes.toBytes("TestTable");
   public static final byte [] FAMILY_NAME = Bytes.toBytes("info");
   public static final byte [] QUALIFIER_NAME = Bytes.toBytes("data");
@@ -111,7 +111,7 @@ public class PerformanceEvaluation imple
   }
 
   protected Map<String, CmdDescriptor> commands = new TreeMap<String, CmdDescriptor>();
-  
+
   volatile Configuration conf;
   private boolean miniCluster = false;
   private boolean nomapred = false;
@@ -127,9 +127,9 @@ public class PerformanceEvaluation imple
   public static final Pattern LINE_PATTERN =
     Pattern.compile("startRow=(\\d+),\\s+" +
         "perClientRunRows=(\\d+),\\s+" +
-        "totalRows=(\\d+),\\s+" + 
-        "clients=(\\d+),\\s+" + 
-        "flushCommits=(\\w+),\\s+" + 
+        "totalRows=(\\d+),\\s+" +
+        "clients=(\\d+),\\s+" +
+        "flushCommits=(\\w+),\\s+" +
         "writeToWAL=(\\w+)");
 
   /**
@@ -141,8 +141,8 @@ public class PerformanceEvaluation imple
     ELAPSED_TIME,
     /** number of rows */
     ROWS}
-  
-  
+
+
   /**
    * Constructor
    * @param c Configuration object
@@ -174,13 +174,13 @@ public class PerformanceEvaluation imple
         "Run scan test using a filter to find a specific row based on it's value (make sure to use --rows=20)");
   }
 
-  protected void addCommandDescriptor(Class<? extends Test> cmdClass, 
+  protected void addCommandDescriptor(Class<? extends Test> cmdClass,
       String name, String description) {
-    CmdDescriptor cmdDescriptor = 
+    CmdDescriptor cmdDescriptor =
       new CmdDescriptor(cmdClass, name, description);
     commands.put(name, cmdDescriptor);
   }
-  
+
   /**
    * Implementations can have their status set.
    */
@@ -192,11 +192,11 @@ public class PerformanceEvaluation imple
      */
     void setStatus(final String msg) throws IOException;
   }
-  
+
   /**
    *  This class works as the InputSplit of Performance Evaluation
-   *  MapReduce InputFormat, and the Record Value of RecordReader. 
-   *  Each map task will only read one record from a PeInputSplit, 
+   *  MapReduce InputFormat, and the Record Value of RecordReader.
+   *  Each map task will only read one record from a PeInputSplit,
    *  the record value is the PeInputSplit itself.
    */
   public static class PeInputSplit extends InputSplit implements Writable {
@@ -206,7 +206,7 @@ public class PerformanceEvaluation imple
     private int clients = 0;
     private boolean flushCommits = false;
     private boolean writeToWAL = true;
-      
+
     public PeInputSplit() {
       this.startRow = 0;
       this.rows = 0;
@@ -215,7 +215,7 @@ public class PerformanceEvaluation imple
       this.flushCommits = false;
       this.writeToWAL = true;
     }
-    
+
     public PeInputSplit(int startRow, int rows, int totalRows, int clients,
         boolean flushCommits, boolean writeToWAL) {
       this.startRow = startRow;
@@ -225,13 +225,13 @@ public class PerformanceEvaluation imple
       this.flushCommits = flushCommits;
       this.writeToWAL = writeToWAL;
     }
-    
+
     @Override
     public void readFields(DataInput in) throws IOException {
       this.startRow = in.readInt();
       this.rows = in.readInt();
       this.totalRows = in.readInt();
-      this.clients = in.readInt();      
+      this.clients = in.readInt();
       this.flushCommits = in.readBoolean();
       this.writeToWAL = in.readBoolean();
     }
@@ -245,29 +245,29 @@ public class PerformanceEvaluation imple
       out.writeBoolean(flushCommits);
       out.writeBoolean(writeToWAL);
     }
-    
+
     @Override
     public long getLength() throws IOException, InterruptedException {
       return 0;
     }
-  
+
     @Override
     public String[] getLocations() throws IOException, InterruptedException {
       return new String[0];
     }
-    
+
     public int getStartRow() {
       return startRow;
     }
-    
+
     public int getRows() {
       return rows;
     }
-    
+
     public int getTotalRows() {
       return totalRows;
     }
-    
+
     public int getClients() {
       return clients;
     }
@@ -280,10 +280,10 @@ public class PerformanceEvaluation imple
       return writeToWAL;
     }
   }
-  
+
   /**
    *  InputFormat of Performance Evaluation MapReduce job.
-   *  It extends from FileInputFormat, want to use it's methods such as setInputPaths(). 
+   *  It extends from FileInputFormat, want to use it's methods such as setInputPaths().
    */
   public static class PeInputFormat extends FileInputFormat<NullWritable, PeInputSplit> {
 
@@ -291,7 +291,7 @@ public class PerformanceEvaluation imple
     public List<InputSplit> getSplits(JobContext job) throws IOException {
       // generate splits
       List<InputSplit> splitList = new ArrayList<InputSplit>();
-      
+
       for (FileStatus file: listStatus(job)) {
         Path path = file.getPath();
         FileSystem fs = path.getFileSystem(job.getConfiguration());
@@ -313,7 +313,7 @@ public class PerformanceEvaluation imple
             boolean flushCommits = Boolean.parseBoolean(m.group(5));
             boolean writeToWAL = Boolean.parseBoolean(m.group(6));
 
-            LOG.debug("split["+ splitList.size() + "] " + 
+            LOG.debug("split["+ splitList.size() + "] " +
                      " startRow=" + startRow +
                      " rows=" + rows +
                      " totalRows=" + totalRows +
@@ -322,60 +322,60 @@ public class PerformanceEvaluation imple
                      " writeToWAL=" + writeToWAL);
 
             PeInputSplit newSplit =
-              new PeInputSplit(startRow, rows, totalRows, clients, 
+              new PeInputSplit(startRow, rows, totalRows, clients,
                 flushCommits, writeToWAL);
             splitList.add(newSplit);
           }
         }
         in.close();
       }
-      
+
       LOG.info("Total # of splits: " + splitList.size());
       return splitList;
     }
-    
+
     @Override
     public RecordReader<NullWritable, PeInputSplit> createRecordReader(InputSplit split,
                             TaskAttemptContext context) {
       return new PeRecordReader();
     }
-    
+
     public static class PeRecordReader extends RecordReader<NullWritable, PeInputSplit> {
       private boolean readOver = false;
       private PeInputSplit split = null;
       private NullWritable key = null;
       private PeInputSplit value = null;
-      
+
       @Override
-      public void initialize(InputSplit split, TaskAttemptContext context) 
+      public void initialize(InputSplit split, TaskAttemptContext context)
                   throws IOException, InterruptedException {
         this.readOver = false;
         this.split = (PeInputSplit)split;
       }
-      
+
       @Override
       public boolean nextKeyValue() throws IOException, InterruptedException {
         if(readOver) {
           return false;
         }
-        
+
         key = NullWritable.get();
         value = (PeInputSplit)split;
-        
+
         readOver = true;
         return true;
       }
-      
+
       @Override
       public NullWritable getCurrentKey() throws IOException, InterruptedException {
         return key;
       }
-      
+
       @Override
       public PeInputSplit getCurrentValue() throws IOException, InterruptedException {
         return value;
       }
-      
+
       @Override
       public float getProgress() throws IOException, InterruptedException {
         if(readOver) {
@@ -384,18 +384,18 @@ public class PerformanceEvaluation imple
           return 0.0f;
         }
       }
-      
+
       @Override
       public void close() throws IOException {
         // do nothing
       }
     }
   }
-  
+
   /**
    * MapReduce job that runs a performance evaluation client in each map task.
    */
-  public static class EvaluationMapTask 
+  public static class EvaluationMapTask
       extends Mapper<NullWritable, PeInputSplit, LongWritable, LongWritable> {
 
     /** configuration parameter name that contains the command */
@@ -432,18 +432,18 @@ public class PerformanceEvaluation imple
       return clazz;
     }
 
-    protected void map(NullWritable key, PeInputSplit value, final Context context) 
+    protected void map(NullWritable key, PeInputSplit value, final Context context)
            throws IOException, InterruptedException {
-      
+
       Status status = new Status() {
         public void setStatus(String msg) {
-           context.setStatus(msg); 
+           context.setStatus(msg);
         }
       };
-      
+
       // Evaluation task
       long elapsedTime = this.pe.runOneClient(this.cmd, value.getStartRow(),
-                                  value.getRows(), value.getTotalRows(), 
+                                  value.getRows(), value.getTotalRows(),
                                   value.isFlushCommits(), value.isWriteToWAL(),
                                   status);
       // Collect how much time the thing took. Report as map output and
@@ -454,7 +454,7 @@ public class PerformanceEvaluation imple
       context.progress();
     }
   }
-  
+
   /*
    * If table does not already exist, create.
    * @param c Client to use checking.
@@ -490,7 +490,7 @@ public class PerformanceEvaluation imple
       doMapReduce(cmd);
     }
   }
-  
+
   /*
    * Run all clients in this vm each to its own thread.
    * @param cmd Command to run.
@@ -536,7 +536,7 @@ public class PerformanceEvaluation imple
       }
     }
   }
-  
+
   /*
    * Run a mapreduce job.  Run as many maps as asked-for clients.
    * Before we start up the job, write out an input file with instruction
@@ -552,24 +552,24 @@ public class PerformanceEvaluation imple
     Job job = new Job(this.conf);
     job.setJarByClass(PerformanceEvaluation.class);
     job.setJobName("HBase Performance Evaluation");
-    
+
     job.setInputFormatClass(PeInputFormat.class);
     PeInputFormat.setInputPaths(job, inputDir);
-    
+
     job.setOutputKeyClass(LongWritable.class);
     job.setOutputValueClass(LongWritable.class);
-    
+
     job.setMapperClass(EvaluationMapTask.class);
     job.setReducerClass(LongSumReducer.class);
-        
+
     job.setNumReduceTasks(1);
-    
+
     job.setOutputFormatClass(TextOutputFormat.class);
     TextOutputFormat.setOutputPath(job, new Path(inputDir,"outputs"));
-    
+
     job.waitForCompletion(true);
   }
-  
+
   /*
    * Write input file of offsets-per-client for the mapreduce job.
    * @param c Configuration
@@ -694,7 +694,7 @@ public class PerformanceEvaluation imple
    */
   static abstract class Test {
     // Below is make it so when Tests are all running in the one
-    // jvm, that they each have a differently seeded Random. 
+    // jvm, that they each have a differently seeded Random.
     private static final Random randomSeed =
       new Random(System.currentTimeMillis());
     private static long nextRandomSeed() {
@@ -729,16 +729,16 @@ public class PerformanceEvaluation imple
       this.flushCommits = options.isFlushCommits();
       this.writeToWAL = options.isWriteToWAL();
     }
-    
+
     private String generateStatus(final int sr, final int i, final int lr) {
       return sr + "/" + i + "/" + lr;
     }
-    
+
     protected int getReportingPeriod() {
       int period = this.perClientRunRows / 10;
       return period == 0? this.perClientRunRows: period;
     }
-    
+
     void testSetup() throws IOException {
       this.admin = new HBaseAdmin(conf);
       this.table = new HTable(conf, tableName);
@@ -752,7 +752,7 @@ public class PerformanceEvaluation imple
         this.table.flushCommits();
       }
     }
-    
+
     /*
      * Run test
      * @return Elapsed time.
@@ -811,7 +811,7 @@ public class PerformanceEvaluation imple
       }
       s.close();
     }
- 
+
     @Override
     protected int getReportingPeriod() {
       int period = this.perClientRunRows / 100;
@@ -924,12 +924,12 @@ public class PerformanceEvaluation imple
     }
 
   }
-  
+
   static class RandomWriteTest extends Test {
     RandomWriteTest(Configuration conf, TestOptions options, Status status) {
       super(conf, options, status);
     }
-    
+
     @Override
     void testRow(final int i) throws IOException {
       byte [] row = getRandomRow(this.rand, this.totalRows);
@@ -940,19 +940,19 @@ public class PerformanceEvaluation imple
       table.put(put);
     }
   }
-  
+
   static class ScanTest extends Test {
     private ResultScanner testScanner;
 
     ScanTest(Configuration conf, TestOptions options, Status status) {
       super(conf, options, status);
     }
-    
+
     @Override
     void testSetup() throws IOException {
       super.testSetup();
     }
-    
+
     @Override
     void testTakedown() throws IOException {
       if (this.testScanner != null) {
@@ -960,8 +960,8 @@ public class PerformanceEvaluation imple
       }
       super.testTakedown();
     }
-    
-    
+
+
     @Override
     void testRow(final int i) throws IOException {
       if (this.testScanner == null) {
@@ -973,12 +973,12 @@ public class PerformanceEvaluation imple
     }
 
   }
-  
+
   static class SequentialReadTest extends Test {
     SequentialReadTest(Configuration conf, TestOptions options, Status status) {
       super(conf, options, status);
     }
-    
+
     @Override
     void testRow(final int i) throws IOException {
       Get get = new Get(format(i));
@@ -987,12 +987,12 @@ public class PerformanceEvaluation imple
     }
 
   }
-  
+
   static class SequentialWriteTest extends Test {
     SequentialWriteTest(Configuration conf, TestOptions options, Status status) {
       super(conf, options, status);
     }
-    
+
     @Override
     void testRow(final int i) throws IOException {
       Put put = new Put(format(i));
@@ -1036,7 +1036,7 @@ public class PerformanceEvaluation imple
       return scan;
     }
   }
-  
+
   /*
    * Format passed integer.
    * @param number
@@ -1052,7 +1052,7 @@ public class PerformanceEvaluation imple
     }
     return b;
   }
-  
+
   /*
    * This method takes some time and is done inline uploading data.  For
    * example, doing the mapfile test, generation of the key and value
@@ -1064,14 +1064,14 @@ public class PerformanceEvaluation imple
     r.nextBytes(b);
     return b;
   }
-  
+
   static byte [] getRandomRow(final Random random, final int totalRows) {
     return format(random.nextInt(Integer.MAX_VALUE) % totalRows);
   }
-  
+
   long runOneClient(final Class<? extends Test> cmd, final int startRow,
-                    final int perClientRunRows, final int totalRows, 
-                    boolean flushCommits, boolean writeToWAL, 
+                    final int perClientRunRows, final int totalRows,
+                    boolean flushCommits, boolean writeToWAL,
                     final Status status)
   throws IOException {
     status.setStatus("Start " + cmd + " at offset " + startRow + " for " +
@@ -1099,7 +1099,7 @@ public class PerformanceEvaluation imple
       "ms at offset " + startRow + " for " + perClientRunRows + " rows");
     return totalElapsedTime;
   }
-  
+
   private void runNIsOne(final Class<? extends Test> cmd) {
     Status status = new Status() {
       public void setStatus(String msg) throws IOException {
@@ -1115,7 +1115,7 @@ public class PerformanceEvaluation imple
         status);
     } catch (Exception e) {
       LOG.error("Failed", e);
-    } 
+    }
   }
 
   private void runTest(final Class<? extends Test> cmd) throws IOException,
@@ -1127,7 +1127,7 @@ public class PerformanceEvaluation imple
       dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
       zooKeeperCluster = new MiniZooKeeperCluster();
       int zooKeeperPort = zooKeeperCluster.startup(new File(System.getProperty("java.io.tmpdir")));
-      
+
       // mangle the conf so that the fs parameter points to the minidfs we
       // just started up
       FileSystem fs = dfsCluster.getFileSystem();
@@ -1139,14 +1139,14 @@ public class PerformanceEvaluation imple
       FSUtils.setVersion(fs, parentdir);
       hbaseMiniCluster = new MiniHBaseCluster(this.conf, N);
     }
-    
+
     try {
       if (N == 1) {
         // If there is only one client and one HRegionServer, we assume nothing
         // has been set up at all.
         runNIsOne(cmd);
       } else {
-        // Else, run 
+        // Else, run
         runNIsMoreThanOne(cmd);
       }
     } finally {
@@ -1157,11 +1157,11 @@ public class PerformanceEvaluation imple
       }
     }
   }
-  
+
   protected void printUsage() {
     printUsage(null);
   }
-  
+
   protected void printUsage(final String message) {
     if (message != null && message.length() > 0) {
       System.err.println(message);
@@ -1203,16 +1203,16 @@ public class PerformanceEvaluation imple
     // Set total number of rows to write.
     this.R = this.R * N;
   }
-  
+
   public int doCommandLine(final String[] args) {
     // Process command-line args. TODO: Better cmd-line processing
-    // (but hopefully something not as painful as cli options).    
+    // (but hopefully something not as painful as cli options).
     int errCode = -1;
     if (args.length < 1) {
       printUsage();
       return errCode;
     }
-    
+
     try {
       for (int i = 0; i < args.length; i++) {
         String cmd = args[i];
@@ -1221,19 +1221,19 @@ public class PerformanceEvaluation imple
           errCode = 0;
           break;
         }
-       
+
         final String miniClusterArgKey = "--miniCluster";
         if (cmd.startsWith(miniClusterArgKey)) {
           this.miniCluster = true;
           continue;
         }
-        
+
         final String nmr = "--nomapred";
         if (cmd.startsWith(nmr)) {
           this.nomapred = true;
           continue;
         }
-        
+
         final String rows = "--rows=";
         if (cmd.startsWith(rows)) {
           this.R = Integer.parseInt(cmd.substring(rows.length()));
@@ -1259,14 +1259,14 @@ public class PerformanceEvaluation imple
           errCode = 0;
           break;
         }
-    
+
         printUsage();
         break;
       }
     } catch (Exception e) {
       e.printStackTrace();
     }
-    
+
     return errCode;
   }
 

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java Fri May  7 19:26:45 2010
@@ -33,13 +33,13 @@ import org.apache.commons.logging.LogFac
 public class PerformanceEvaluationCommons {
   static final Log LOG =
     LogFactory.getLog(PerformanceEvaluationCommons.class.getName());
-  
+
   public static void assertValueSize(final int expectedSize, final int got) {
     if (got != expectedSize) {
       throw new AssertionError("Expected " + expectedSize + " but got " + got);
     }
   }
-  
+
   public static void assertKey(final byte [] expected, final ByteBuffer got) {
     byte [] b = new byte[got.limit()];
     got.get(b, 0, got.limit());
@@ -53,7 +53,7 @@ public class PerformanceEvaluationCommon
         " but got " + org.apache.hadoop.hbase.util.Bytes.toString(got));
     }
   }
-  
+
   public static void concurrentReads(final Runnable r) {
     final int count = 1;
     long now = System.currentTimeMillis();

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestCompare.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestCompare.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestCompare.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestCompare.java Fri May  7 19:26:45 2010
@@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.util.Byte
  * Test comparing HBase objects.
  */
 public class TestCompare extends TestCase {
-    
+
   /**
    * Sort of HRegionInfo.
    */

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestHMsg.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestHMsg.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestHMsg.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestHMsg.java Fri May  7 19:26:45 2010
@@ -54,7 +54,7 @@ public class TestHMsg extends TestCase {
      new HRegionInfo(new HTableDescriptor(Bytes.toBytes("test")), b, b));
     assertNotSame(-1, msgs.indexOf(hmsg));
   }
-  
+
   public void testSerialization() throws IOException {
     // Check out new HMsg that carries two daughter split regions.
     byte [] abytes = Bytes.toBytes("a");

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java Fri May  7 19:26:45 2010
@@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.client.HT
  */
 public class TestInfoServers extends HBaseClusterTestCase {
   static final Log LOG = LogFactory.getLog(TestInfoServers.class);
-  
+
   @Override
   protected void preHBaseClusterSetup() {
     // Bring up info servers on 'odd' port numbers in case the test is not
@@ -41,7 +41,7 @@ public class TestInfoServers extends HBa
     conf.setInt("hbase.master.info.port", 60011);
     conf.setInt("hbase.regionserver.info.port", 60031);
   }
-  
+
   /**
    * @throws Exception
    */
@@ -56,7 +56,7 @@ public class TestInfoServers extends HBa
     assertHasExpectedContent(new URL("http://localhost:" + port +
       "/index.html"), "regionserver");
   }
-  
+
   private void assertHasExpectedContent(final URL u, final String expected)
   throws IOException {
     LOG.info("Testing " + u.toString() + " has " + expected);

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java Fri May  7 19:26:45 2010
@@ -40,7 +40,7 @@ public class TestKeyValue extends TestCa
     byte [] qualifier1 = Bytes.toBytes("def");
     byte [] family2 = Bytes.toBytes("abcd");
     byte [] qualifier2 = Bytes.toBytes("ef");
-    
+
     KeyValue aaa = new KeyValue(a, family1, qualifier1, 0L, Type.Put, a);
     assertFalse(aaa.matchingColumn(family2, qualifier2));
     assertTrue(aaa.matchingColumn(family1, qualifier1));
@@ -62,7 +62,7 @@ public class TestKeyValue extends TestCa
     check(Bytes.toBytes(getName()), Bytes.toBytes(getName()), null, 1, null);
     check(HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes(getName()), null, 1, null);
   }
-  
+
   private void check(final byte [] row, final byte [] family, byte [] qualifier,
     final long timestamp, final byte [] value) {
     KeyValue kv = new KeyValue(row, family, qualifier, timestamp, value);
@@ -71,7 +71,7 @@ public class TestKeyValue extends TestCa
     // Call toString to make sure it works.
     LOG.info(kv.toString());
   }
-  
+
   public void testPlainCompare() throws Exception {
     final byte [] a = Bytes.toBytes("aaa");
     final byte [] b = Bytes.toBytes("bbb");
@@ -118,11 +118,11 @@ public class TestKeyValue extends TestCa
     KVComparator c = new KeyValue.RootComparator();
     assertTrue(c.compare(b, a) < 0);
     KeyValue aa = new KeyValue(Bytes.toBytes(".META.,,1"), now);
-    KeyValue bb = new KeyValue(Bytes.toBytes(".META.,,1"), 
+    KeyValue bb = new KeyValue(Bytes.toBytes(".META.,,1"),
         Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1235943454602L,
         (byte[])null);
     assertTrue(c.compare(aa, bb) < 0);
-    
+
     // Meta compares
     KeyValue aaa = new KeyValue(
         Bytes.toBytes("TestScanMultipleVersions,row_0500,1236020145502"), now);
@@ -130,12 +130,12 @@ public class TestKeyValue extends TestCa
         Bytes.toBytes("TestScanMultipleVersions,,99999999999999"), now);
     c = new KeyValue.MetaComparator();
     assertTrue(c.compare(bbb, aaa) < 0);
-    
+
     KeyValue aaaa = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,,1236023996656"),
         Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1236024396271L,
         (byte[])null);
     assertTrue(c.compare(aaaa, bbb) < 0);
-    
+
     KeyValue x = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,row_0500,1236034574162"),
         Bytes.toBytes("info"), Bytes.toBytes(""), 9223372036854775807L,
         (byte[])null);
@@ -152,7 +152,7 @@ public class TestKeyValue extends TestCa
   /**
    * Tests cases where rows keys have characters below the ','.
    * See HBASE-832
-   * @throws IOException 
+   * @throws IOException
    */
   public void testKeyValueBorderCases() throws IOException {
     // % sorts before , so if we don't do special comparator, rowB would
@@ -163,15 +163,15 @@ public class TestKeyValue extends TestCa
         Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null);
     assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0);
 
-    rowA = new KeyValue(Bytes.toBytes("testtable,,1234"), Bytes.toBytes("fam"), 
+    rowA = new KeyValue(Bytes.toBytes("testtable,,1234"), Bytes.toBytes("fam"),
         Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null);
-    rowB = new KeyValue(Bytes.toBytes("testtable,$www.hbase.org/,99999"), 
+    rowB = new KeyValue(Bytes.toBytes("testtable,$www.hbase.org/,99999"),
         Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null);
     assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0);
 
-    rowA = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/,1234,4321"), 
+    rowA = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/,1234,4321"),
         Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null);
-    rowB = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/%20,99999,99999"), 
+    rowB = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/%20,99999,99999"),
         Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null);
     assertTrue(KeyValue.ROOT_COMPARATOR.compare(rowA, rowB) < 0);
   }
@@ -275,6 +275,6 @@ public class TestKeyValue extends TestCa
     // Test multiple KeyValues in a single blob.
 
     // TODO actually write this test!
-    
+
   }
 }

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestMergeMeta.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestMergeMeta.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestMergeMeta.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestMergeMeta.java Fri May  7 19:26:45 2010
@@ -24,7 +24,7 @@ import java.io.IOException;
 /** Tests region merging */
 public class TestMergeMeta extends AbstractMergeTestBase {
 
-  /** constructor 
+  /** constructor
    * @throws Exception
    */
   public TestMergeMeta() throws Exception {
@@ -32,7 +32,7 @@ public class TestMergeMeta extends Abstr
     conf.setLong("hbase.client.pause", 1 * 1000);
     conf.setInt("hbase.client.retries.number", 2);
   }
-  
+
   /**
    * test case
    * @throws IOException

Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java?rev=942186&r1=942185&r2=942186&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java Fri May  7 19:26:45 2010
@@ -42,11 +42,11 @@ public class TestRegionRebalancing exten
   HTable table;
 
   HTableDescriptor desc;
-  
+
   final byte[] FIVE_HUNDRED_KBYTES;
-  
+
   final byte [] FAMILY_NAME = Bytes.toBytes("col");
-  
+
   /** constructor */
   public TestRegionRebalancing() {
     super(1);
@@ -54,11 +54,11 @@ public class TestRegionRebalancing exten
     for (int i = 0; i < 500 * 1024; i++) {
       FIVE_HUNDRED_KBYTES[i] = 'x';
     }
-    
+
     desc = new HTableDescriptor("test");
     desc.addFamily(new HColumnDescriptor(FAMILY_NAME));
   }
-  
+
   /**
    * Before the hbase cluster starts up, create some dummy regions.
    */
@@ -72,32 +72,32 @@ public class TestRegionRebalancing exten
     }
     startKeys.add(null);
     LOG.info(startKeys.size() + " start keys generated");
-        
+
     List<HRegion> regions = new ArrayList<HRegion>();
     for (int i = 0; i < 20; i++) {
       regions.add(createAregion(startKeys.get(i), startKeys.get(i+1)));
     }
-    
+
     // Now create the root and meta regions and insert the data regions
     // created above into the meta
-    
+
     createRootAndMetaRegions();
     for (HRegion region : regions) {
       HRegion.addRegionToMETA(meta, region);
     }
     closeRootAndMeta();
   }
-  
+
   /**
    * For HBASE-71. Try a few different configurations of starting and stopping
    * region servers to see if the assignment or regions is pretty balanced.
-   * @throws IOException 
+   * @throws IOException
    */
   public void testRebalancing() throws IOException {
     table = new HTable(conf, "test");
-    assertEquals("Test table should have 20 regions", 
+    assertEquals("Test table should have 20 regions",
       20, table.getStartKeys().length);
-    
+
     // verify that the region assignments are balanced to start out
     assertRegionsAreBalanced();
 
@@ -105,32 +105,32 @@ public class TestRegionRebalancing exten
     // add a region server - total of 2
     cluster.startRegionServer();
     assertRegionsAreBalanced();
-  
+
     // add a region server - total of 3
-    LOG.debug("Adding 3rd region server.");    
+    LOG.debug("Adding 3rd region server.");
     cluster.startRegionServer();
     assertRegionsAreBalanced();
-    
+
     // kill a region server - total of 2
     LOG.debug("Killing the 3rd region server.");
     cluster.stopRegionServer(2, false);
     cluster.waitOnRegionServer(2);
     assertRegionsAreBalanced();
-    
+
     // start two more region servers - total of 4
     LOG.debug("Adding 3rd region server");
     cluster.startRegionServer();
-    LOG.debug("Adding 4th region server");    
-    cluster.startRegionServer();    
+    LOG.debug("Adding 4th region server");
+    cluster.startRegionServer();
     assertRegionsAreBalanced();
 
     for (int i = 0; i < 6; i++){
-      LOG.debug("Adding " + (i + 5) + "th region server");    
+      LOG.debug("Adding " + (i + 5) + "th region server");
       cluster.startRegionServer();
     }
     assertRegionsAreBalanced();
   }
-    
+
   /** figure out how many regions are currently being served. */
   private int getRegionCount() {
     int total = 0;
@@ -139,7 +139,7 @@ public class TestRegionRebalancing exten
     }
     return total;
   }
-  
+
   /**
    * Determine if regions are balanced. Figure out the total, divide by the
    * number of online servers, then test if each server is +/- 1 of average
@@ -160,39 +160,39 @@ public class TestRegionRebalancing exten
       double avg = cluster.getMaster().getAverageLoad();
       int avgLoadPlusSlop = (int)Math.ceil(avg * (1 + slop));
       int avgLoadMinusSlop = (int)Math.floor(avg * (1 - slop)) - 1;
-      LOG.debug("There are " + servers.size() + " servers and " + regionCount 
+      LOG.debug("There are " + servers.size() + " servers and " + regionCount
         + " regions. Load Average: " + avg + " low border: " + avgLoadMinusSlop
         + ", up border: " + avgLoadPlusSlop + "; attempt: " + i);
 
       for (HRegionServer server : servers) {
         int serverLoad = server.getOnlineRegions().size();
         LOG.debug(server.hashCode() + " Avg: " + avg + " actual: " + serverLoad);
-        if (!(avg > 2.0 && serverLoad <= avgLoadPlusSlop 
+        if (!(avg > 2.0 && serverLoad <= avgLoadPlusSlop
             && serverLoad >= avgLoadMinusSlop)) {
           LOG.debug(server.hashCode() + " Isn't balanced!!! Avg: " + avg +
               " actual: " + serverLoad + " slop: " + slop);
           success = false;
         }
       }
-      
+
       if (!success) {
-        // one or more servers are not balanced. sleep a little to give it a 
+        // one or more servers are not balanced. sleep a little to give it a
         // chance to catch up. then, go back to the retry loop.
         try {
           Thread.sleep(10000);
         } catch (InterruptedException e) {}
-        
+
         continue;
       }
-      
+
       // if we get here, all servers were balanced, so we should just return.
       return;
     }
-    // if we get here, we tried 5 times and never got to short circuit out of 
+    // if we get here, we tried 5 times and never got to short circuit out of
     // the retry loop, so this is a failure.
     fail("After 5 attempts, region assignments were not balanced.");
   }
-  
+
   private List<HRegionServer> getOnlineRegionServers() {
     List<HRegionServer> list = new ArrayList<HRegionServer>();
     for (JVMClusterUtil.RegionServerThread rst : cluster.getRegionServerThreads()) {
@@ -204,7 +204,7 @@ public class TestRegionRebalancing exten
   }
 
   /**
-   * Wait until all the regions are assigned. 
+   * Wait until all the regions are assigned.
    */
   private void waitForAllRegionsAssigned() {
     while (getRegionCount() < 22) {
@@ -218,9 +218,9 @@ public class TestRegionRebalancing exten
 
   /**
    * create a region with the specified start and end key and exactly one row
-   * inside. 
+   * inside.
    */
-  private HRegion createAregion(byte [] startKey, byte [] endKey) 
+  private HRegion createAregion(byte [] startKey, byte [] endKey)
   throws IOException {
     HRegion region = createNewHRegion(desc, startKey, endKey);
     byte [] keyToWrite = startKey == null ? Bytes.toBytes("row_000") : startKey;