You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2009/02/25 06:34:30 UTC

svn commit: r747666 [3/3] - in /hadoop/hbase/trunk: ./ conf/ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/io/ src/java/org/apache/hadoop/hbase/master/ src/java/org/apache/hadoop/hbase/regio...

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestEmptyMetaInfo.java Wed Feb 25 05:34:29 2009
@@ -22,7 +22,6 @@
 
 import java.io.IOException;
 
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Scanner;
 import org.apache.hadoop.hbase.io.BatchUpdate;

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestRegionRebalancing.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestRegionRebalancing.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestRegionRebalancing.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestRegionRebalancing.java Wed Feb 25 05:34:29 2009
@@ -81,11 +81,9 @@
     // created above into the meta
     
     createRootAndMetaRegions();
-    
     for (HRegion region : regions) {
       HRegion.addRegionToMETA(meta, region);
     }
-    
     closeRootAndMeta();
   }
   
@@ -208,16 +206,11 @@
    */
   private HRegion createAregion(byte [] startKey, byte [] endKey) 
   throws IOException {
-    
     HRegion region = createNewHRegion(desc, startKey, endKey);
-    
     byte [] keyToWrite = startKey == null ? Bytes.toBytes("row_000") : startKey;
-    
     BatchUpdate bu = new BatchUpdate(keyToWrite);
     bu.put(COLUMN_NAME, "test".getBytes());
-
     region.batchUpdate(bu, null);
-
     region.close();
     region.getLog().closeAndDelete();
     return region;

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScanMultipleVersions.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScanMultipleVersions.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScanMultipleVersions.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScanMultipleVersions.java Wed Feb 25 05:34:29 2009
@@ -24,6 +24,8 @@
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Scanner;
 import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -88,15 +90,23 @@
   public void testScanMultipleVersions() throws Exception {
     // At this point we have created multiple regions and both HDFS and HBase
     // are running. There are 5 cases we have to test. Each is described below.
-
     HTable t = new HTable(conf, TABLE_NAME);
+    for (int i = 0; i < ROWS.length; i++) {
+      for (int j = 0; j < TIMESTAMPS.length; j++) {
+        Cell [] cells =
+          t.get(ROWS[i], HConstants.COLUMN_FAMILY, TIMESTAMPS[j], 1);
+        assertTrue(cells != null && cells.length == 1);
+        System.out.println("Row=" + Bytes.toString(ROWS[i]) + ", cell=" +
+          cells[0]);
+      }
+    }
     
     // Case 1: scan with LATEST_TIMESTAMP. Should get two rows
-    
     int count = 0;
     Scanner s = t.getScanner(HConstants.COLUMN_FAMILY_ARRAY);
     try {
-      while (s.next() != null) {
+      for (RowResult rr = null; (rr = s.next()) != null;) {
+        System.out.println(rr.toString());
         count += 1;
       }
       assertEquals("Number of rows should be 2", 2, count);

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java Wed Feb 25 05:34:29 2009
@@ -86,14 +86,5 @@
     System.out.println("Regions after split (" + m.size() + "): " + m);
     // should have two regions now
     assertTrue(m.size() == 2);
-    // and "mmi" should be the midpoint
-    for (HRegionInfo hri: m.keySet()) {
-      byte[] start = hri.getStartKey();
-      byte[] end = hri.getEndKey();
-      if (Bytes.equals(start, HConstants.EMPTY_BYTE_ARRAY))
-        assertTrue(Bytes.equals(end, key_mmi));
-      if (Bytes.equals(end, key_mmi))
-        assertTrue(Bytes.equals(start, HConstants.EMPTY_BYTE_ARRAY));
-    }
   }
 }

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java Wed Feb 25 05:34:29 2009
@@ -189,7 +189,7 @@
     // that maps to a null value.
     // Testing row with columnKeys: a-e, e maps to null
     colvalues.put(new byte [] {LAST_CHAR}, 
-      new Cell(HLogEdit.deleteBytes.get(), HConstants.LATEST_TIMESTAMP));
+      new Cell(HLogEdit.DELETED_BYTES, HConstants.LATEST_TIMESTAMP));
     assertFalse("Failed with last columnKey " + LAST_CHAR + " mapping to null.", 
       filter.filterRow(colvalues));
   }

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java Wed Feb 25 05:34:29 2009
@@ -21,19 +21,16 @@
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestCase;
-import org.apache.hadoop.hbase.io.MapFile;
-
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HStoreKey;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.io.Cell;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 
 /**
  * Test compactions
@@ -136,12 +133,12 @@
     // they were deleted.
     int count = 0;
     boolean containsStartRow = false;
-    for (MapFile.Reader reader: this.r.stores.
-        get(Bytes.mapKey(COLUMN_FAMILY_TEXT_MINUS_COLON)).getReaders()) {
-      reader.reset();
-      HStoreKey key = new HStoreKey();
-      ImmutableBytesWritable val = new ImmutableBytesWritable();
-      while(reader.next(key, val)) {
+    for (StoreFile f: this.r.stores.
+        get(Bytes.mapKey(COLUMN_FAMILY_TEXT_MINUS_COLON)).getStorefiles().values()) {
+      HFileScanner scanner = f.getReader().getScanner();
+      scanner.seekTo();
+      do {
+        HStoreKey key = HStoreKey.create(scanner.getKey());
         if (Bytes.equals(key.getRow(), STARTROW)) {
           containsStartRow = true;
           count++;
@@ -150,13 +147,13 @@
           // file.
           assertFalse(Bytes.equals(key.getRow(), secondRowBytes));
         }
-      }
+      } while(scanner.next());
     }
     assertTrue(containsStartRow);
     assertTrue(count == 3);
     // Do a simple TTL test.
     final int ttlInSeconds = 1;
-    for (HStore store: this.r.stores.values()) {
+    for (Store store: this.r.stores.values()) {
       store.ttl = ttlInSeconds * 1000;
     }
     Thread.sleep(ttlInSeconds * 1000);
@@ -167,14 +164,15 @@
   
   private int count() throws IOException {
     int count = 0;
-    for (MapFile.Reader reader: this.r.stores.
-        get(Bytes.mapKey(COLUMN_FAMILY_TEXT_MINUS_COLON)).getReaders()) {
-      reader.reset();
-      HStoreKey key = new HStoreKey();
-      ImmutableBytesWritable val = new ImmutableBytesWritable();
-      while(reader.next(key, val)) {
-        count++;
+    for (StoreFile f: this.r.stores.
+        get(Bytes.mapKey(COLUMN_FAMILY_TEXT_MINUS_COLON)).getStorefiles().values()) {
+      HFileScanner scanner = f.getReader().getScanner();
+      if (!scanner.seekTo()) {
+        continue;
       }
+      do {
+        count++;
+      } while(scanner.next());
     }
     return count;
   }

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java Wed Feb 25 05:34:29 2009
@@ -61,6 +61,175 @@
       this.miniHdfs.getFileSystem().getHomeDirectory().toString());
   }
 
+
+  public void testGetFullMultiMapfile() throws IOException {
+    HRegion region = null;
+    BatchUpdate batchUpdate = null;
+    Map<byte [], Cell> results = null;
+    
+    try {
+      HTableDescriptor htd = createTableDescriptor(getName());
+      region = createNewHRegion(htd, null, null);
+
+      // Test ordering issue
+      //
+      byte [] row = Bytes.toBytes("row1");
+     
+      // write some data
+      batchUpdate = new BatchUpdate(row);
+      batchUpdate.put(COLUMNS[0], "olderValue".getBytes());
+      region.batchUpdate(batchUpdate, null);
+
+      // flush
+      region.flushcache();
+
+      // assert that getFull gives us the older value
+      results = region.getFull(row, (Set<byte []>)null, LATEST_TIMESTAMP, 1, null);
+      assertEquals("olderValue", new String(results.get(COLUMNS[0]).getValue()));
+      
+      // write a new value for the cell
+      batchUpdate = new BatchUpdate(row);
+      batchUpdate.put(COLUMNS[0], "newerValue".getBytes());
+      region.batchUpdate(batchUpdate, null);
+
+      // flush
+      region.flushcache();
+      
+      // assert that getFull gives us the later value
+      results = region.getFull(row, (Set<byte []>)null, LATEST_TIMESTAMP, 1, null);
+      assertEquals("newerValue", new String(results.get(COLUMNS[0]).getValue()));
+     
+      //
+      // Test the delete masking issue
+      //
+      byte [] row2 = Bytes.toBytes("row2");
+      byte [] cell1 = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "a");
+      byte [] cell2 = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "b");
+      byte [] cell3 = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "c");
+      
+      // write some data at two columns
+      batchUpdate = new BatchUpdate(row2);
+      batchUpdate.put(cell1, "column0 value".getBytes());
+      batchUpdate.put(cell2, "column1 value".getBytes());
+      region.batchUpdate(batchUpdate, null);
+      
+      // flush
+      region.flushcache();
+      
+      // assert i get both columns
+      results = region.getFull(row2, (Set<byte []>)null, LATEST_TIMESTAMP, 1, null);
+      assertEquals("Should have two columns in the results map", 2, results.size());
+      assertEquals("column0 value", new String(results.get(cell1).getValue()));
+      assertEquals("column1 value", new String(results.get(cell2).getValue()));
+      
+      // write a delete for the first column
+      batchUpdate = new BatchUpdate(row2);
+      batchUpdate.delete(cell1);
+      batchUpdate.put(cell2, "column1 new value".getBytes());      
+      region.batchUpdate(batchUpdate, null);
+            
+      // flush
+      region.flushcache(); 
+      
+      // assert i get the second column only
+      results = region.getFull(row2, (Set<byte []>)null, LATEST_TIMESTAMP, 1, null);
+      System.out.println(Bytes.toString(results.keySet().iterator().next()));
+      assertEquals("Should have one column in the results map", 1, results.size());
+      assertNull("column0 value", results.get(cell1));
+      assertEquals("column1 new value", new String(results.get(cell2).getValue()));
+      
+      //
+      // Include a delete and value from the memcache in the mix
+      //
+      batchUpdate = new BatchUpdate(row2);
+      batchUpdate.delete(cell2);
+      batchUpdate.put(cell3, "column3 value!".getBytes());
+      region.batchUpdate(batchUpdate, null);
+      
+      // assert i get the third column only
+      results = region.getFull(row2, (Set<byte []>)null, LATEST_TIMESTAMP, 1, null);
+      assertEquals("Should have one column in the results map", 1, results.size());
+      assertNull("column0 value", results.get(cell1));
+      assertNull("column1 value", results.get(cell2));
+      assertEquals("column3 value!", new String(results.get(cell3).getValue()));
+      
+    } finally {
+      if (region != null) {
+        try {
+          region.close();
+        } catch (Exception e) {
+          e.printStackTrace();
+        }
+        region.getLog().closeAndDelete();
+      }
+    }  
+  }
+
+  /** For HBASE-694 */
+  public void testGetClosestRowBefore2() throws IOException{
+
+    HRegion region = null;
+    BatchUpdate batchUpdate = null;
+    
+    try {
+      HTableDescriptor htd = createTableDescriptor(getName());
+      region = createNewHRegion(htd, null, null);
+     
+      // set up some test data
+      String t10 = "010";
+      String t20 = "020";
+      String t30 = "030";
+      String t40 = "040";
+      
+      batchUpdate = new BatchUpdate(t10);
+      batchUpdate.put(COLUMNS[0], "t10 bytes".getBytes());
+      region.batchUpdate(batchUpdate, null);
+      
+      batchUpdate = new BatchUpdate(t30);
+      batchUpdate.put(COLUMNS[0], "t30 bytes".getBytes());
+      region.batchUpdate(batchUpdate, null);
+      
+      batchUpdate = new BatchUpdate(t40);
+      batchUpdate.put(COLUMNS[0], "t40 bytes".getBytes());
+      region.batchUpdate(batchUpdate, null);
+
+      // try finding "035"
+      String t35 = "035";
+      Map<byte [], Cell> results = 
+        region.getClosestRowBefore(Bytes.toBytes(t35), COLUMNS[0]);
+      assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t30 bytes");
+
+      region.flushcache();
+
+      // try finding "035"
+      results = region.getClosestRowBefore(Bytes.toBytes(t35), COLUMNS[0]);
+      assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t30 bytes");
+
+      batchUpdate = new BatchUpdate(t20);
+      batchUpdate.put(COLUMNS[0], "t20 bytes".getBytes());
+      region.batchUpdate(batchUpdate, null);
+
+      // try finding "035"
+      results = region.getClosestRowBefore(Bytes.toBytes(t35), COLUMNS[0]);
+      assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t30 bytes");
+
+      region.flushcache();
+
+      // try finding "035"
+      results = region.getClosestRowBefore(Bytes.toBytes(t35), COLUMNS[0]);
+      assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t30 bytes");
+    } finally {
+      if (region != null) {
+        try {
+          region.close();
+        } catch (Exception e) {
+          e.printStackTrace();
+        }
+        region.getLog().closeAndDelete();
+      }
+    }
+  }
+
   /**
    * Test for HBASE-808 and HBASE-809.
    * @throws Exception
@@ -349,13 +518,11 @@
   public void testGetClosestRowBefore() throws IOException{
 
     HRegion region = null;
-    HRegionIncommon region_incommon = null;
     BatchUpdate batchUpdate = null;
     
     try {
       HTableDescriptor htd = createTableDescriptor(getName());
       region = createNewHRegion(htd, null, null);
-      region_incommon = new HRegionIncommon(region);
      
       // set up some test data
       String t10 = "010";
@@ -438,71 +605,6 @@
     }
   }
 
-  /** For HBASE-694 */
-  public void testGetClosestRowBefore2() throws IOException{
-
-    HRegion region = null;
-    BatchUpdate batchUpdate = null;
-    
-    try {
-      HTableDescriptor htd = createTableDescriptor(getName());
-      region = createNewHRegion(htd, null, null);
-     
-      // set up some test data
-      String t10 = "010";
-      String t20 = "020";
-      String t30 = "030";
-      String t40 = "040";
-      
-      batchUpdate = new BatchUpdate(t10);
-      batchUpdate.put(COLUMNS[0], "t10 bytes".getBytes());
-      region.batchUpdate(batchUpdate, null);
-      
-      batchUpdate = new BatchUpdate(t30);
-      batchUpdate.put(COLUMNS[0], "t30 bytes".getBytes());
-      region.batchUpdate(batchUpdate, null);
-      
-      batchUpdate = new BatchUpdate(t40);
-      batchUpdate.put(COLUMNS[0], "t40 bytes".getBytes());
-      region.batchUpdate(batchUpdate, null);
-
-      // try finding "035"
-      String t35 = "035";
-      Map<byte [], Cell> results = 
-        region.getClosestRowBefore(Bytes.toBytes(t35), COLUMNS[0]);
-      assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t30 bytes");
-
-      region.flushcache();
-
-      // try finding "035"
-      results = region.getClosestRowBefore(Bytes.toBytes(t35), COLUMNS[0]);
-      assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t30 bytes");
-
-      batchUpdate = new BatchUpdate(t20);
-      batchUpdate.put(COLUMNS[0], "t20 bytes".getBytes());
-      region.batchUpdate(batchUpdate, null);
-      
-      // try finding "035"
-      results = region.getClosestRowBefore(Bytes.toBytes(t35), COLUMNS[0]);
-      assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t30 bytes");
-      
-      region.flushcache();
-
-      // try finding "035"
-      results = region.getClosestRowBefore(Bytes.toBytes(t35), COLUMNS[0]);
-      assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t30 bytes");
-    } finally {
-      if (region != null) {
-        try {
-          region.close();
-        } catch (Exception e) {
-          e.printStackTrace();
-        }
-        region.getLog().closeAndDelete();
-      }
-    }
-  }
-
   /**
    * For HBASE-40
    */
@@ -576,112 +678,7 @@
     assertNull(result.get(COLUMNS[1]));
     assertNull(result.get(COLUMNS[2]));    
   }  
-  
-  public void testGetFullMultiMapfile() throws IOException {
-    HRegion region = null;
-    HRegionIncommon region_incommon = null;
-    BatchUpdate batchUpdate = null;
-    Map<byte [], Cell> results = null;
-    
-    try {
-      HTableDescriptor htd = createTableDescriptor(getName());
-      region = createNewHRegion(htd, null, null);
-      region_incommon = new HRegionIncommon(region);
-           
-      //
-      // Test ordering issue
-      //
-      byte [] row = Bytes.toBytes("row1");
-     
-      // write some data
-      batchUpdate = new BatchUpdate(row);
-      batchUpdate.put(COLUMNS[0], "olderValue".getBytes());
-      region.batchUpdate(batchUpdate, null);
 
-      // flush
-      region.flushcache();
-      
-      // assert that getFull gives us the older value
-      results = region.getFull(row, (Set<byte []>)null, LATEST_TIMESTAMP, 1, null);
-      assertEquals("olderValue", new String(results.get(COLUMNS[0]).getValue()));
-      
-      // write a new value for the cell
-      batchUpdate = new BatchUpdate(row);
-      batchUpdate.put(COLUMNS[0], "newerValue".getBytes());
-      region.batchUpdate(batchUpdate, null);
-      
-      // flush
-      region.flushcache();
-      
-      // assert that getFull gives us the later value
-      results = region.getFull(row, (Set<byte []>)null, LATEST_TIMESTAMP, 1, null);
-      assertEquals("newerValue", new String(results.get(COLUMNS[0]).getValue()));
-     
-      //
-      // Test the delete masking issue
-      //
-      byte [] row2 = Bytes.toBytes("row2");
-      byte [] cell1 = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "a");
-      byte [] cell2 = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "b");
-      byte [] cell3 = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "c");
-      
-      // write some data at two columns
-      batchUpdate = new BatchUpdate(row2);
-      batchUpdate.put(cell1, "column0 value".getBytes());
-      batchUpdate.put(cell2, "column1 value".getBytes());
-      region.batchUpdate(batchUpdate, null);
-      
-      // flush
-      region.flushcache();
-      
-      // assert i get both columns
-      results = region.getFull(row2, (Set<byte []>)null, LATEST_TIMESTAMP, 1, null);
-      assertEquals("Should have two columns in the results map", 2, results.size());
-      assertEquals("column0 value", new String(results.get(cell1).getValue()));
-      assertEquals("column1 value", new String(results.get(cell2).getValue()));
-      
-      // write a delete for the first column
-      batchUpdate = new BatchUpdate(row2);
-      batchUpdate.delete(cell1);
-      batchUpdate.put(cell2, "column1 new value".getBytes());      
-      region.batchUpdate(batchUpdate, null);
-            
-      // flush
-      region.flushcache(); 
-      
-      // assert i get the second column only
-      results = region.getFull(row2, (Set<byte []>)null, LATEST_TIMESTAMP, 1, null);
-      assertEquals("Should have one column in the results map", 1, results.size());
-      assertNull("column0 value", results.get(cell1));
-      assertEquals("column1 new value", new String(results.get(cell2).getValue()));
-      
-      //
-      // Include a delete and value from the memcache in the mix
-      //
-      batchUpdate = new BatchUpdate(row2);
-      batchUpdate.delete(cell2);      
-      batchUpdate.put(cell3, "column2 value!".getBytes());
-      region.batchUpdate(batchUpdate, null);
-      
-      // assert i get the third column only
-      results = region.getFull(row2, (Set<byte []>)null, LATEST_TIMESTAMP, 1, null);
-      assertEquals("Should have one column in the results map", 1, results.size());
-      assertNull("column0 value", results.get(cell1));
-      assertNull("column1 value", results.get(cell2));
-      assertEquals("column2 value!", new String(results.get(cell3).getValue()));
-      
-    } finally {
-      if (region != null) {
-        try {
-          region.close();
-        } catch (Exception e) {
-          e.printStackTrace();
-        }
-        region.getLog().closeAndDelete();
-      }
-    }  
-  }
-  
   private void assertColumnsPresent(final HRegion r, final byte [] row)
   throws IOException {
     Map<byte [], Cell> result = 

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java Wed Feb 25 05:34:29 2009
@@ -134,7 +134,8 @@
         assertTrue(Bytes.equals(tableName, key.getTablename()));
         assertTrue(Bytes.equals(HLog.METAROW, key.getRow()));
         assertTrue(Bytes.equals(HLog.METACOLUMN, val.getColumn()));
-        assertEquals(0, HLogEdit.completeCacheFlush.compareTo(val.getVal()));
+        assertEquals(0, Bytes.compareTo(HLogEdit.COMPLETE_CACHE_FLUSH,
+          val.getVal()));
         System.out.println(key + " " + val);
       }
     } finally {

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java Wed Feb 25 05:34:29 2009
@@ -273,7 +273,7 @@
     hmemcache.add(getHSKForRow(t20), "t20 bytes".getBytes());
     hmemcache.add(getHSKForRow(t30), "t30 bytes".getBytes());
     // write a delete in there to see if things still work ok
-    hmemcache.add(getHSKForRow(t35), HLogEdit.deleteBytes.get());
+    hmemcache.add(getHSKForRow(t35), HLogEdit.DELETED_BYTES);
     hmemcache.add(getHSKForRow(t40), "t40 bytes".getBytes());
     
     SortedMap<HStoreKey, Long> results = null;

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java Wed Feb 25 05:34:29 2009
@@ -146,7 +146,7 @@
     byte [] collabel = null;
     for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
       byte [] rowlabel = Bytes.toBytes("row_" + k);
-
+      if (k % 100 == 0) LOG.info(Bytes.toString(rowlabel));
       byte [] bodydata = region.get(rowlabel, CONTENTS_BASIC).getValue();
       assertNotNull(bodydata);
       String bodystr = new String(bodydata, HConstants.UTF8_ENCODING).trim();
@@ -381,7 +381,7 @@
       numInserted += 2;
     }
 
-    LOG.info("Write " + (vals1.length / 2) + " rows. Elapsed time: "
+    LOG.info("Write " + (vals1.length / 2) + " rows (second half). Elapsed time: "
         + ((System.currentTimeMillis() - startTime) / 1000.0));
 
     // 6.  Scan from cache and disk
@@ -464,19 +464,16 @@
     } finally {
       s.close();
     }
-    assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
-    
+    assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched,
+      numInserted, numFetched);
     LOG.info("Scanned " + vals1.length
         + " rows from disk. Elapsed time: "
         + ((System.currentTimeMillis() - startTime) / 1000.0));
 
     // 9. Scan with a starting point
-
     startTime = System.currentTimeMillis();
-    
     s = r.getScanner(cols, Bytes.toBytes("row_vals1_500"),
         System.currentTimeMillis(), null);
-    
     numFetched = 0;
     try {
       HStoreKey curKey = new HStoreKey();
@@ -503,7 +500,8 @@
     } finally {
       s.close();
     }
-    assertEquals("Should have fetched " + (numInserted / 2) + " values, but fetched " + numFetched, (numInserted / 2), numFetched);
+    assertEquals("Should have fetched " + (numInserted / 2) +
+      " values, but fetched " + numFetched, (numInserted / 2), numFetched);
     
     LOG.info("Scanned " + (numFetched / 2)
         + " rows from disk with specified start point. Elapsed time: "
@@ -515,30 +513,27 @@
   // NOTE: This test depends on testBatchWrite succeeding
   private void splitAndMerge() throws IOException {
     Path oldRegionPath = r.getRegionDir();
-    byte [] midKey = r.compactStores();
-    assertNotNull(midKey);
+    byte [] splitRow = r.compactStores();
+    assertNotNull(splitRow);
     long startTime = System.currentTimeMillis();
-    HRegion subregions[] = r.splitRegion(midKey);
+    HRegion subregions[] = r.splitRegion(splitRow);
     if (subregions != null) {
       LOG.info("Split region elapsed time: "
           + ((System.currentTimeMillis() - startTime) / 1000.0));
-
       assertEquals("Number of subregions", subregions.length, 2);
-
       for (int i = 0; i < subregions.length; i++) {
         subregions[i] = openClosedRegion(subregions[i]);
         subregions[i].compactStores();
       }
       
       // Now merge it back together
-
       Path oldRegion1 = subregions[0].getRegionDir();
       Path oldRegion2 = subregions[1].getRegionDir();
       startTime = System.currentTimeMillis();
       r = HRegion.mergeAdjacent(subregions[0], subregions[1]);
       region = new HRegionIncommon(r);
-      LOG.info("Merge regions elapsed time: "
-          + ((System.currentTimeMillis() - startTime) / 1000.0));
+      LOG.info("Merge regions elapsed time: " +
+        ((System.currentTimeMillis() - startTime) / 1000.0));
       fs.delete(oldRegion1, true);
       fs.delete(oldRegion2, true);
       fs.delete(oldRegionPath, true);
@@ -598,8 +593,10 @@
         curVals.clear();
         k++;
       }
-      assertEquals("Expected " + NUM_VALS + " " + CONTENTS_BASIC + " values, but fetched " + contentsFetched, NUM_VALS, contentsFetched);
-      assertEquals("Expected " + NUM_VALS + " " + ANCHORNUM + " values, but fetched " + anchorFetched, NUM_VALS, anchorFetched);
+      assertEquals("Expected " + NUM_VALS + " " + Bytes.toString(CONTENTS_BASIC) +
+        " values, but fetched " + contentsFetched, NUM_VALS, contentsFetched);
+      assertEquals("Expected " + NUM_VALS + " " + ANCHORNUM +
+        " values, but fetched " + anchorFetched, NUM_VALS, anchorFetched);
 
       LOG.info("Scanned " + NUM_VALS
           + " rows from disk. Elapsed time: "
@@ -673,8 +670,8 @@
         }
         curVals.clear();
       }
-      assertEquals("Inserted " + (NUM_VALS + numInserted/2) + " values, but fetched " + fetched, (NUM_VALS + numInserted/2), fetched);
-
+      assertEquals("Inserted " + (NUM_VALS + numInserted/2) +
+        " values, but fetched " + fetched, (NUM_VALS + numInserted/2), fetched);
       LOG.info("Scanned " + fetched
           + " rows from disk. Elapsed time: "
           + ((System.currentTimeMillis() - startTime) / 1000.0));

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java Wed Feb 25 05:34:29 2009
@@ -154,7 +154,8 @@
     this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
     HRegionIncommon hri = new HRegionIncommon(r);
     try {
-      addContent(hri, Bytes.toString(HConstants.COL_REGIONINFO));
+      LOG.info("Added: " + 
+        addContent(hri, Bytes.toString(HConstants.COL_REGIONINFO)));
       int count = count(hri, -1);
       assertEquals(count, count(hri, 100));
       assertEquals(count, count(hri, 0));

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java Wed Feb 25 05:34:29 2009
@@ -133,11 +133,12 @@
   }
   
   private void basicSplit(final HRegion region) throws Exception {
-    addContent(region, COLFAMILY_NAME3);
+    LOG.info("" + addContent(region, COLFAMILY_NAME3));
     region.flushcache();
-    byte [] midkey = region.compactStores();
-    assertNotNull(midkey);
-    HRegion [] regions = split(region, midkey);
+    byte [] splitRow = region.compactStores();
+    assertNotNull(splitRow);
+    LOG.info("SplitRow: " + Bytes.toString(splitRow));
+    HRegion [] regions = split(region, splitRow);
     try {
       // Need to open the regions.
       // TODO: Add an 'open' to HRegion... don't do open by constructing
@@ -148,11 +149,11 @@
       // Assert can get rows out of new regions. Should be able to get first
       // row from first region and the midkey from second region.
       assertGet(regions[0], COLFAMILY_NAME3, Bytes.toBytes(START_KEY));
-      assertGet(regions[1], COLFAMILY_NAME3, midkey);
+      assertGet(regions[1], COLFAMILY_NAME3, splitRow);
       // Test I can get scanner and that it starts at right place.
       assertScan(regions[0], COLFAMILY_NAME3,
           Bytes.toBytes(START_KEY));
-      assertScan(regions[1], COLFAMILY_NAME3, midkey);
+      assertScan(regions[1], COLFAMILY_NAME3, splitRow);
       // Now prove can't split regions that have references.
       for (int i = 0; i < regions.length; i++) {
         // Add so much data to this region, we create a store file that is >
@@ -251,11 +252,11 @@
     }
   }
   
-  private HRegion [] split(final HRegion r, final byte [] midKey)
+  private HRegion [] split(final HRegion r, final byte [] splitRow)
   throws IOException {
     // Assert can get mid key from passed region.
-    assertGet(r, COLFAMILY_NAME3, midKey);
-    HRegion [] regions = r.splitRegion(midKey);
+    assertGet(r, COLFAMILY_NAME3, splitRow);
+    HRegion [] regions = r.splitRegion(splitRow);
     assertEquals(regions.length, 2);
     return regions;
   }

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestTimestamp.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestTimestamp.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestTimestamp.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestTimestamp.java Wed Feb 25 05:34:29 2009
@@ -83,4 +83,4 @@
       HConstants.FOREVER, false));
     return createNewHRegion(htd, null, null);
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java?rev=747666&r1=747665&r2=747666&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java Wed Feb 25 05:34:29 2009
@@ -52,14 +52,12 @@
     this.conf.set("hbase.hstore.compactionThreshold", "2");
 
     // Create table description
-    
     this.desc = new HTableDescriptor("TestMergeTool");
     this.desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
 
     /*
      * Create the HRegionInfos for the regions.
      */
-    
     // Region 0 will contain the key range [row_0200,row_0300)
     sourceRegions[0] = new HRegionInfo(this.desc, Bytes.toBytes("row_0200"),
       Bytes.toBytes("row_0300"));
@@ -89,10 +87,10 @@
      */
     this.rows = new byte [5][][];
     this.rows[0] = Bytes.toByteArrays(new String[] { "row_0210", "row_0280" });
-    this.rows[1] = Bytes.toByteArrays(new String[] { "row_0260", "row_0350" });
-    this.rows[2] = Bytes.toByteArrays(new String[] { "row_0110", "row_0175" });
-    this.rows[3] = Bytes.toByteArrays(new String[] { "row_0525", "row_0560" });
-    this.rows[4] = Bytes.toByteArrays(new String[] { "row_0050", "row_1000" });
+    this.rows[1] = Bytes.toByteArrays(new String[] { "row_0260", "row_0350", "row_035" });
+    this.rows[2] = Bytes.toByteArrays(new String[] { "row_0110", "row_0175", "row_0175", "row_0175"});
+    this.rows[3] = Bytes.toByteArrays(new String[] { "row_0525", "row_0560", "row_0560", "row_0560", "row_0560"});
+    this.rows[4] = Bytes.toByteArrays(new String[] { "row_0050", "row_1000", "row_1000", "row_1000", "row_1000", "row_1000" });
     
     // Start up dfs
     this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);