You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2007/06/11 18:46:31 UTC

svn commit: r546192 [3/3] - in /lucene/hadoop/trunk/src/contrib/hbase: ./ conf/ src/java/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java Mon Jun 11 09:46:27 2007
@@ -15,8 +15,6 @@
  */
 package org.apache.hadoop.hbase;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.*;
 
 import java.io.*;
@@ -25,9 +23,15 @@
  * A Key for a stored row
  ******************************************************************************/
 public class HStoreKey implements WritableComparable {
-  private final Log LOG = LogFactory.getLog(this.getClass().getName());
   
-  public static Text extractFamily(Text col) throws IOException {
+  /**
+   * Extracts the column family name from a column
+   * For example, returns 'info' if the specified column was 'info:server'
+   * 
+   * @param col         - name of column
+   * @return            - column family name
+   */
+  public static Text extractFamily(Text col) {
     String column = col.toString();
     int colpos = column.indexOf(":");
     if(colpos < 0) {
@@ -40,56 +44,126 @@
   Text column;
   long timestamp;
 
+  /** Default constructor used in conjunction with Writable interface */
   public HStoreKey() {
     this.row = new Text();
     this.column = new Text();
     this.timestamp = Long.MAX_VALUE;
   }
   
+  /**
+   * Create an HStoreKey specifying only the row
+   * The column defaults to the empty string and the time stamp defaults to
+   * Long.MAX_VALUE
+   * 
+   * @param row - row key
+   */
   public HStoreKey(Text row) {
     this.row = new Text(row);
     this.column = new Text();
     this.timestamp = Long.MAX_VALUE;
   }
   
+  /**
+   * Create an HStoreKey specifying the row and timestamp
+   * The column name defaults to the empty string
+   * 
+   * @param row         - row key
+   * @param timestamp   - timestamp value
+   */
   public HStoreKey(Text row, long timestamp) {
     this.row = new Text(row);
     this.column = new Text();
     this.timestamp = timestamp;
   }
   
+  /**
+   * Create an HStoreKey specifying the row and column names
+   * The timestamp defaults to Long.MAX_VALUE
+   * 
+   * @param row         - row key
+   * @param column      - column key
+   */
   public HStoreKey(Text row, Text column) {
     this.row = new Text(row);
     this.column = new Text(column);
     this.timestamp = Long.MAX_VALUE;
   }
   
+  /**
+   * Create an HStoreKey specifying all the fields
+   * 
+   * @param row         - row key
+   * @param column      - column key
+   * @param timestamp   - timestamp value
+   */
   public HStoreKey(Text row, Text column, long timestamp) {
     this.row = new Text(row);
     this.column = new Text(column);
     this.timestamp = timestamp;
   }
   
+  /**
+   * Construct a new HStoreKey from another
+   * 
+   * @param other - the source key
+   */
+  public HStoreKey(HStoreKey other) {
+    this();
+    this.row.set(other.row);
+    this.column.set(other.column);
+    this.timestamp = other.timestamp;
+  }
+  
+  /**
+   * Change the value of the row key
+   * 
+   * @param newrow      - new row key value
+   */
   public void setRow(Text newrow) {
     this.row.set(newrow);
   }
   
+  /**
+   * Change the value of the column key
+   * 
+   * @param newcol      - new column key value
+   */
   public void setColumn(Text newcol) {
     this.column.set(newcol);
   }
   
+  /**
+   * Change the value of the timestamp field
+   * 
+   * @param timestamp   - new timestamp value
+   */
   public void setVersion(long timestamp) {
     this.timestamp = timestamp;
   }
   
+  /**
+   * Set the value of this HStoreKey from the supplied key
+   * 
+   * @param k - key value to copy
+   */
+  public void set(HStoreKey k) {
+    this.row = k.getRow();
+    this.column = k.getColumn();
+    this.timestamp = k.getTimestamp();
+  }
+  
+  /** @return value of row key */
   public Text getRow() {
     return row;
   }
   
+  /** @return value of column key */
   public Text getColumn() {
     return column;
   }
   
+  /** @return value of timestamp */
   public long getTimestamp() {
     return timestamp;
   }
@@ -125,18 +199,12 @@
    * @see #matchesWithoutColumn(HStoreKey)
    */
   public boolean matchesRowFamily(HStoreKey other) {
-    boolean status = false;
-    try {
-      status = this.row.compareTo(other.row) == 0
+    return this.row.compareTo(other.row) == 0
         && extractFamily(this.column).compareTo(
             extractFamily(other.getColumn())) == 0;
-      
-    } catch(IOException e) {
-      LOG.error(e);
-    }
-    return status;
   }
   
+  @Override
   public String toString() {
     return row.toString() + "/" + column.toString() + "/" + timestamp;
   }
@@ -158,6 +226,9 @@
   // Comparable
   //////////////////////////////////////////////////////////////////////////////
 
+  /* (non-Javadoc)
+   * @see java.lang.Comparable#compareTo(java.lang.Object)
+   */
   public int compareTo(Object o) {
     HStoreKey other = (HStoreKey) o;
     int result = this.row.compareTo(other.row);
@@ -180,12 +251,18 @@
   // Writable
   //////////////////////////////////////////////////////////////////////////////
 
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.io.Writable#write(java.io.DataOutput)
+   */
   public void write(DataOutput out) throws IOException {
     row.write(out);
     column.write(out);
     out.writeLong(timestamp);
   }
 
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.io.Writable#readFields(java.io.DataInput)
+   */
   public void readFields(DataInput in) throws IOException {
     row.readFields(in);
     column.readFields(in);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java Mon Jun 11 09:46:27 2007
@@ -43,7 +43,8 @@
    */
   private static final Pattern LEGAL_TABLE_NAME =
     Pattern.compile("[\\w-]+");
-  
+
+  /** Constructs an empty object */
   public HTableDescriptor() {
     this.name = new Text();
     this.families = new TreeMap<Text, HColumnDescriptor>();
@@ -66,6 +67,7 @@
     this.families = new TreeMap<Text, HColumnDescriptor>();
   }
 
+  /** @return name of table */
   public Text getName() {
     return name;
   }
@@ -78,7 +80,12 @@
     families.put(family.getName(), family);
   }
 
-  /** Do we contain a given column? */
+  /**
+   * Checks to see if this table contains the given column family
+   * 
+   * @param family - family name
+   * @return true if the table contains the specified family name
+   */
   public boolean hasFamily(Text family) {
     return families.containsKey(family);
   }
@@ -87,6 +94,8 @@
    * 
    *  TODO: What is this used for? Seems Dangerous to let people play with our
    *  private members.
+   *  
+   *  @return map of family members
    */
   public TreeMap<Text, HColumnDescriptor> families() {
     return families;
@@ -95,7 +104,7 @@
   @Override
   public String toString() {
     return "name: " + this.name.toString() + ", families: " + this.families;
-  }
+      }
   
   @Override
   public boolean equals(Object obj) {

Added: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/KeyedData.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/KeyedData.java?view=auto&rev=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/KeyedData.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/KeyedData.java Mon Jun 11 09:46:27 2007
@@ -0,0 +1,74 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+import org.apache.hadoop.io.*;
+
+import java.io.*;
+
+/*******************************************************************************
+ * LabelledData is just a data pair.
+ * It includes an HStoreKey and some associated data.
+ ******************************************************************************/
+public class KeyedData implements Writable {
+  HStoreKey key;
+  BytesWritable data;
+
+  /** Default constructor. Used by Writable interface */
+  public KeyedData() {
+    this.key = new HStoreKey();
+    this.data = new BytesWritable();
+  }
+
+  /**
+   * Create a KeyedData object specifying the parts
+   * @param key         - HStoreKey
+   * @param data        - BytesWritable
+   */
+  public KeyedData(HStoreKey key, BytesWritable data) {
+    this.key = key;
+    this.data = data;
+  }
+
+  /** @return - returns the key */
+  public HStoreKey getKey() {
+    return key;
+  }
+
+  /** @return - returns the value */
+  public BytesWritable getData() {
+    return data;
+  }
+
+  //////////////////////////////////////////////////////////////////////////////
+  // Writable
+  //////////////////////////////////////////////////////////////////////////////
+
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.io.Writable#write(java.io.DataOutput)
+   */
+  public void write(DataOutput out) throws IOException {
+    key.write(out);
+    data.write(out);
+  }
+  
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.io.Writable#readFields(java.io.DataInput)
+   */
+  public void readFields(DataInput in) throws IOException {
+    key.readFields(in);
+    data.readFields(in);
+  }
+}

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LeaseListener.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LeaseListener.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LeaseListener.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/LeaseListener.java Mon Jun 11 09:46:27 2007
@@ -17,25 +17,14 @@
 
 
 /*******************************************************************************
- * LeaseListener is a small class meant to be overridden by users of the Leases 
+ * LeaseListener is an interface meant to be implemented by users of the Leases 
  * class.
  *
  * It receives events from the Leases class about the status of its accompanying
  * lease.  Users of the Leases class can use a LeaseListener subclass to, for 
  * example, clean up resources after a lease has expired.
  ******************************************************************************/
-public abstract class LeaseListener {
-  public LeaseListener() {
-  }
-
-  public void leaseRenewed() {
-  }
-
-  /** When the user cancels a lease, this method is called. */
-  public void leaseCancelled() {
-  }
-
+public interface LeaseListener {
   /** When a lease expires, this method is called. */
-  public void leaseExpired() {
-  }
+  public void leaseExpired();
 }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java Mon Jun 11 09:46:27 2007
@@ -48,7 +48,12 @@
   TreeSet<Lease> sortedLeases = new TreeSet<Lease>();
   boolean running = true;
 
-  /** Indicate the length of the lease, in milliseconds */
+  /**
+   * Creates a lease
+   * 
+   * @param leasePeriod - length of time (milliseconds) that the lease is valid
+   * @param leaseCheckFrequency - how often the lease should be checked (milliseconds)
+   */
   public Leases(long leasePeriod, long leaseCheckFrequency) {
     this.leasePeriod = leasePeriod;
     this.leaseCheckFrequency = leaseCheckFrequency;
@@ -59,7 +64,7 @@
   }
 
   /**
-   * Shut down this Leases outfit.  All pending leases will be destroyed, 
+   * Shut down this Leases instance.  All pending leases will be destroyed, 
    * without any cancellation calls.
    */
   public void close() {
@@ -89,15 +94,21 @@
   }
 
   /** A client obtains a lease... */
+  /**
+   * Obtain a lease
+   * 
+   * @param holderId - name of lease holder
+   * @param resourceId - resource being leased
+   * @param listener - listener that will process lease expirations
+   */
   public void createLease(Text holderId, Text resourceId,
-      final LeaseListener listener)
-  throws IOException {
+      final LeaseListener listener) {
     synchronized(leases) {
       synchronized(sortedLeases) {
         Lease lease = new Lease(holderId, resourceId, listener);
         Text leaseId = lease.getLeaseId();
         if(leases.get(leaseId) != null) {
-          throw new IOException("Impossible state for createLease(): Lease " +
+          throw new AssertionError("Impossible state for createLease(): Lease " +
             getLeaseName(holderId, resourceId) + " is still held.");
         }
         leases.put(leaseId, lease);
@@ -110,6 +121,13 @@
   }
   
   /** A client renews a lease... */
+  /**
+   * Renew a lease
+   * 
+   * @param holderId - name of lease holder
+   * @param resourceId - resource being leased
+   * @throws IOException
+   */
   public void renewLease(Text holderId, Text resourceId) throws IOException {
     synchronized(leases) {
       synchronized(sortedLeases) {
@@ -132,8 +150,12 @@
     }
   }
 
-  /** A client explicitly cancels a lease.
-   * The lease-cleanup method is not called.
+  /**
+   * Client explicitly cancels a lease.
+   * 
+   * @param holderId - name of lease holder
+   * @param resourceId - resource being leased
+   * @throws IOException
    */
   public void cancelLease(Text holderId, Text resourceId) throws IOException {
     synchronized(leases) {
@@ -152,7 +174,6 @@
         sortedLeases.remove(lease);
         leases.remove(leaseId);
 
-        lease.cancelled();
       }
     }     
     if (LOG.isDebugEnabled()) {
@@ -197,37 +218,33 @@
   }
 
   /** This class tracks a single Lease. */
-  class Lease implements Comparable {
+  @SuppressWarnings("unchecked")
+  private class Lease implements Comparable {
     Text holderId;
     Text resourceId;
     LeaseListener listener;
     long lastUpdate;
 
-    public Lease(Text holderId, Text resourceId, LeaseListener listener) {
+    Lease(Text holderId, Text resourceId, LeaseListener listener) {
       this.holderId = holderId;
       this.resourceId = resourceId;
       this.listener = listener;
       renew();
     }
     
-    public Text getLeaseId() {
+    Text getLeaseId() {
       return createLeaseId(holderId, resourceId);
     }
     
-    public boolean shouldExpire() {
+    boolean shouldExpire() {
       return (System.currentTimeMillis() - lastUpdate > leasePeriod);
     }
     
-    public void renew() {
+    void renew() {
       this.lastUpdate = System.currentTimeMillis();
-      listener.leaseRenewed();
-    }
-    
-    public void cancelled() {
-      listener.leaseCancelled();
     }
     
-    public void expired() {
+    void expired() {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Lease expired " + getLeaseName(this.holderId,
           this.resourceId));

Added: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionServerRunningException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionServerRunningException.java?view=auto&rev=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionServerRunningException.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionServerRunningException.java Mon Jun 11 09:46:27 2007
@@ -0,0 +1,40 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+/**
+ * Thrown if the region server log directory exists (which indicates another
+ * region server is running at the same address)
+ */
+public class RegionServerRunningException extends IOException {
+  private static final long serialVersionUID = 1L << 31 - 1L;
+  
+  /** Default Constructor */
+  public RegionServerRunningException() {
+    super();
+  }
+
+  /**
+   * Constructs the exception and supplies a string as the message
+   * @param s - message
+   */
+  public RegionServerRunningException(String s) {
+    super(s);
+  }
+
+}

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java Mon Jun 11 09:46:27 2007
@@ -19,6 +19,7 @@
 import java.io.UnsupportedEncodingException;
 import java.util.Random;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -36,6 +37,7 @@
   protected FileSystem fs;
   protected Path dir;
 
+  @Override
   public void setUp() throws Exception {
     super.setUp();
     rand = new Random();
@@ -87,23 +89,19 @@
       // Now create the root and meta regions and insert the data regions
       // created above into the meta
       
-      HRegion root = HRegion.createNewHRegion(fs, dir, conf, 
-          HGlobals.rootTableDesc, 0L, null, null);
-      HRegion meta = HRegion.createNewHRegion(fs, dir, conf,
-          HGlobals.metaTableDesc, 1L, null, null);
+      HRegion root = createNewHRegion(fs, dir, conf, HGlobals.rootTableDesc, 0L, null, null);
+      HRegion meta = createNewHRegion(fs, dir, conf, HGlobals.metaTableDesc, 1L, null, null);
     
-      HRegion.addRegionToMeta(root, meta);
+      HRegion.addRegionToMETA(root, meta);
       
       for(int i = 0; i < regions.length; i++) {
-        HRegion.addRegionToMeta(meta, regions[i]);
+        HRegion.addRegionToMETA(meta, regions[i]);
       }
       
       root.close();
-      root.getLog().close();
-      fs.delete(new Path(root.getRegionDir(), HConstants.HREGION_LOGDIR_NAME));
+      root.getLog().closeAndDelete();
       meta.close();
-      meta.getLog().close();
-      fs.delete(new Path(meta.getRegionDir(), HConstants.HREGION_LOGDIR_NAME));
+      meta.getLog().closeAndDelete();
       
     } catch(Throwable t) {
       t.printStackTrace();
@@ -111,6 +109,7 @@
     }
   }
 
+  @Override
   public void tearDown() throws Exception {
     super.tearDown();
     dfsCluster.shutdown();
@@ -118,8 +117,7 @@
 
   private HRegion createAregion(Text startKey, Text endKey, int firstRow, int nrows)
       throws IOException {
-    HRegion region = HRegion.createNewHRegion(fs, dir, conf, desc,
-        rand.nextLong(), startKey, endKey);
+    HRegion region = createNewHRegion(fs, dir, conf, desc, rand.nextLong(), startKey, endKey);
     
     System.out.println("created region " + region.getRegionName());
 
@@ -138,9 +136,22 @@
     region.log.rollWriter();
     region.compactStores();
     region.close();
-    region.getLog().close();
-    fs.delete(new Path(region.getRegionDir(), HConstants.HREGION_LOGDIR_NAME));
+    region.getLog().closeAndDelete();
     region.getRegionInfo().offLine = true;
     return region;
   }
+
+  private HRegion createNewHRegion(FileSystem fs, Path dir,
+      Configuration conf, HTableDescriptor desc, long regionId, Text startKey,
+      Text endKey) throws IOException {
+    
+    HRegionInfo info = new HRegionInfo(regionId, desc, startKey, endKey);
+    Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName);
+    fs.mkdirs(regionDir);
+
+    return new HRegion(dir,
+      new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME), conf),
+      fs, conf, info, null);
+  }
+  
 }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java Mon Jun 11 09:46:27 2007
@@ -22,10 +22,16 @@
 public abstract class HBaseClusterTestCase extends HBaseTestCase {
   protected MiniHBaseCluster cluster;
   final boolean miniHdfs;
+  int regionServers;
   
   protected HBaseClusterTestCase() {
     this(true);
   }
+  
+  protected HBaseClusterTestCase(int regionServers) {
+    this(true);
+    this.regionServers = regionServers;
+  }
 
   protected HBaseClusterTestCase(String name) {
     this(name, true);
@@ -34,18 +40,23 @@
   protected HBaseClusterTestCase(final boolean miniHdfs) {
     super();
     this.miniHdfs = miniHdfs;
+    this.regionServers = 1;
   }
 
   protected HBaseClusterTestCase(String name, final boolean miniHdfs) {
     super(name);
     this.miniHdfs = miniHdfs;
+    this.regionServers = 1;
   }
 
+  @Override
   public void setUp() throws Exception {
     super.setUp();
-    this.cluster = new MiniHBaseCluster(this.conf, 1, this.miniHdfs);
+    this.cluster =
+      new MiniHBaseCluster(this.conf, this.regionServers, this.miniHdfs);
   }
 
+  @Override
   public void tearDown() throws Exception {
     super.tearDown();
     if (this.cluster != null) {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java Mon Jun 11 09:46:27 2007
@@ -38,7 +38,7 @@
   private HMaster master;
   private Thread masterThread;
   private HRegionServer[] regionServers;
-  private Thread[] regionThreads;
+  Thread[] regionThreads;
   
   /**
    * Starts a MiniHBaseCluster on top of a new MiniDFSCluster
@@ -94,7 +94,7 @@
     try {
       try {
         this.fs = FileSystem.get(conf);
-        this.parentdir = new Path(conf.get(HREGION_DIR, DEFAULT_HREGION_DIR));
+        this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
         fs.mkdirs(parentdir);
 
       } catch(Throwable e) {
@@ -146,11 +146,37 @@
   }
   
   /** 
-   * Returns the rpc address actually used by the master server, because the 
-   * supplied port is not necessarily the actual port used.
+   * @return Returns the rpc address actually used by the master server, because
+   * the supplied port is not necessarily the actual port used.
    */
   public HServerAddress getHMasterAddress() {
     return master.getMasterAddress();
+  }
+  
+  /**
+   * Shut down the specified region server cleanly
+   * 
+   * @param serverNumber
+   */
+  public void stopRegionServer(int serverNumber) {
+    if(serverNumber >= regionServers.length) {
+      throw new ArrayIndexOutOfBoundsException(
+          "serverNumber > number of region servers");
+    }
+    this.regionServers[serverNumber].stop();
+  }
+  
+  /**
+   * Cause a region server to exit without cleaning up
+   * 
+   * @param serverNumber
+   */
+  public void abortRegionServer(int serverNumber) {
+    if(serverNumber >= regionServers.length) {
+      throw new ArrayIndexOutOfBoundsException(
+          "serverNumber > number of region servers");
+    }
+    this.regionServers[serverNumber].abort();
   }
   
   /** Shut down the HBase cluster */

Added: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java?view=auto&rev=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCleanRegionServerExit.java Mon Jun 11 09:46:27 2007
@@ -0,0 +1,59 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+/** Tests region server failover when a region server exits cleanly */
+public class TestCleanRegionServerExit extends HBaseClusterTestCase {
+
+  private HClient client;
+  
+  /** Constructor */
+  public TestCleanRegionServerExit() {
+    super(2);                                   // Start two region servers
+    client = new HClient(conf);
+  }
+  
+  /** The test */
+  public void testCleanRegionServerExit() {
+    try {
+      // When the META table can be opened, the region servers are running
+      
+      client.openTable(HConstants.META_TABLE_NAME);
+      
+    } catch(IOException e) {
+      e.printStackTrace();
+      fail();
+    }
+    
+    // Shut down a region server cleanly
+    
+    this.cluster.stopRegionServer(0);
+    try {
+      this.cluster.regionThreads[0].join();
+      
+    } catch(InterruptedException e) {
+    }
+    
+    try {
+      Thread.sleep(60000);              // Wait for cluster to adjust
+      
+    } catch(InterruptedException e) {
+    }
+  }
+
+}

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java Mon Jun 11 09:46:27 2007
@@ -27,6 +27,7 @@
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 
+/** Test case for get */
 public class TestGet extends HBaseTestCase {
   private static final Text CONTENTS = new Text("contents:");
   private static final Text ROW_KEY = new Text(HGlobals.rootRegionInfo.regionName);
@@ -59,6 +60,10 @@
     }
   }
   
+  /** 
+   * Constructor
+   * @throws IOException
+   */
   public void testGet() throws IOException {
     MiniDFSCluster cluster = null;
 
@@ -81,7 +86,7 @@
       
       HLog log = new HLog(fs, new Path(regionDir, "log"), conf);
 
-      HRegion r = new HRegion(dir, log, fs, conf, info, null, null);
+      HRegion r = new HRegion(dir, log, fs, conf, info, null);
       
       // Write information to the table
       
@@ -126,7 +131,7 @@
       
       r.close();
       log.rollWriter();
-      r = new HRegion(dir, log, fs, conf, info, null, null);
+      r = new HRegion(dir, log, fs, conf, info, null);
       
       // Read it back
       
@@ -156,7 +161,7 @@
       
       r.close();
       log.rollWriter();
-      r = new HRegion(dir, log, fs, conf, info, null, null);
+      r = new HRegion(dir, log, fs, conf, info, null);
 
       // Read it back
       
@@ -165,6 +170,7 @@
       // Close region once and for all
       
       r.close();
+      log.closeAndDelete();
       
     } catch(IOException e) {
       e.printStackTrace();

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java Mon Jun 11 09:46:27 2007
@@ -15,6 +15,7 @@
  */
 package org.apache.hadoop.hbase;
 
+import java.io.IOException;
 import java.util.TreeMap;
 
 import org.apache.hadoop.fs.FileSystem;
@@ -24,74 +25,83 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.SequenceFile.Reader;
 
+/** JUnit test case for HLog */
 public class TestHLog extends HBaseTestCase implements HConstants {
 
-  protected void setUp() throws Exception {
+  @Override
+  public void setUp() throws Exception {
     super.setUp();
   }
   
-  public void testAppend() throws Exception {
-    Path dir = getUnitTestdir(getName());
-    FileSystem fs = FileSystem.get(this.conf);
-    if (fs.exists(dir)) {
-      fs.delete(dir);
-    }
-    final int COL_COUNT = 10;
-    final Text regionName = new Text("regionname");
-    final Text tableName = new Text("tablename");
-    final Text row = new Text("row");
-    Reader reader = null;
-    HLog log = new HLog(fs, dir, this.conf);
+  /** The test */
+  public void testAppend() {
     try {
-      // Write columns named 1, 2, 3, etc. and then values of single byte
-      // 1, 2, 3...
-      TreeMap<Text, BytesWritable> cols = new TreeMap<Text, BytesWritable>();
-      for (int i = 0; i < COL_COUNT; i++) {
-        cols.put(new Text(Integer.toString(i)),
-          new BytesWritable(new byte[] { (byte)(i + '0') }));
-      }
-      long timestamp = System.currentTimeMillis();
-      log.append(regionName, tableName, row, cols, timestamp);
-      long logSeqId = log.startCacheFlush();
-      log.completeCacheFlush(regionName, tableName, logSeqId);
-      log.close();
-      Path filename = log.computeFilename(log.filenum - 1);
-      log = null;
-      // Now open a reader on the log and assert append worked.
-      reader = new SequenceFile.Reader(fs, filename, conf);
-      HLogKey key = new HLogKey();
-      HLogEdit val = new HLogEdit();
-      for (int i = 0; i < COL_COUNT; i++) {
-        reader.next(key, val);
-        assertEquals(key.getRegionName(), regionName);
-        assertEquals(key.getTablename(), tableName);
-        assertEquals(key.getRow(), row);
-        assertEquals(val.getVal().get()[0], (byte)(i + '0'));
-        System.out.println(key + " " + val);
-      }
-      while (reader.next(key, val)) {
-        // Assert only one more row... the meta flushed row.
-        assertEquals(key.getRegionName(), regionName);
-        assertEquals(key.getTablename(), tableName);
-        assertEquals(key.getRow(), HLog.METAROW);
-        assertEquals(val.getColumn(), HLog.METACOLUMN);
-        assertEquals(0, val.getVal().compareTo(COMPLETE_CACHEFLUSH));
-        System.out.println(key + " " + val);
-      }
-    } finally {
-      if (log != null) {
-        log.close();
-      }
-      if (reader != null) {
-        reader.close();
-      }
+      Path dir = getUnitTestdir(getName());
+      FileSystem fs = FileSystem.get(this.conf);
       if (fs.exists(dir)) {
         fs.delete(dir);
       }
+      final int COL_COUNT = 10;
+      final Text regionName = new Text("regionname");
+      final Text tableName = new Text("tablename");
+      final Text row = new Text("row");
+      Reader reader = null;
+      HLog log = new HLog(fs, dir, this.conf);
+      try {
+        // Write columns named 1, 2, 3, etc. and then values of single byte
+        // 1, 2, 3...
+        TreeMap<Text, BytesWritable> cols = new TreeMap<Text, BytesWritable>();
+        for (int i = 0; i < COL_COUNT; i++) {
+          cols.put(new Text(Integer.toString(i)),
+              new BytesWritable(new byte[] { (byte)(i + '0') }));
+        }
+        long timestamp = System.currentTimeMillis();
+        log.append(regionName, tableName, row, cols, timestamp);
+        long logSeqId = log.startCacheFlush();
+        log.completeCacheFlush(regionName, tableName, logSeqId);
+        log.close();
+        Path filename = log.computeFilename(log.filenum - 1);
+        log = null;
+        // Now open a reader on the log and assert append worked.
+        reader = new SequenceFile.Reader(fs, filename, conf);
+        HLogKey key = new HLogKey();
+        HLogEdit val = new HLogEdit();
+        for (int i = 0; i < COL_COUNT; i++) {
+          reader.next(key, val);
+          assertEquals(regionName, key.getRegionName());
+          assertEquals(tableName, key.getTablename());
+          assertEquals(row, key.getRow());
+          assertEquals((byte)(i + '0'), val.getVal().get()[0]);
+          System.out.println(key + " " + val);
+        }
+        while (reader.next(key, val)) {
+          // Assert only one more row... the meta flushed row.
+          assertEquals(regionName, key.getRegionName());
+          assertEquals(tableName, key.getTablename());
+          assertEquals(HLog.METAROW, key.getRow());
+          assertEquals(HLog.METACOLUMN, val.getColumn());
+          assertEquals(0, val.getVal().compareTo(COMPLETE_CACHEFLUSH));
+          System.out.println(key + " " + val);
+        }
+      } finally {
+        if (log != null) {
+          log.close();
+        }
+        if (reader != null) {
+          reader.close();
+        }
+        if (fs.exists(dir)) {
+          fs.delete(dir);
+        }
+      }
+    } catch(IOException e) {
+      e.printStackTrace();
+      fail();
     }
   }
 
-  protected void tearDown() throws Exception {
+  @Override
+  public void tearDown() throws Exception {
     super.tearDown();
   }
 }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java Mon Jun 11 09:46:27 2007
@@ -29,6 +29,7 @@
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 
+/** memcache test case */
 public class TestHMemcache extends TestCase {
   
   private HMemcache hmemcache;
@@ -41,6 +42,10 @@
   
   private static final String COLUMN_FAMILY = "column";
 
+  /* (non-Javadoc)
+   * @see junit.framework.TestCase#setUp()
+   */
+  @Override
   protected void setUp() throws Exception {
     super.setUp();
 
@@ -55,6 +60,10 @@
         "org.apache.hadoop.fs.LocalFileSystem");
   }
 
+  /* (non-Javadoc)
+   * @see junit.framework.TestCase#tearDown()
+   */
+  @Override
   protected void tearDown() throws Exception {
     super.tearDown();
   }
@@ -117,24 +126,24 @@
     return s;
   }
 
+  /** 
+   * Test memcache snapshots
+   * @throws IOException
+   */
   public void testSnapshotting() throws IOException {
     final int snapshotCount = 5;
     final Text tableName = new Text(getName());
     HLog log = getLogfile();
-    try {
-      // Add some rows, run a snapshot. Do it a few times.
-      for (int i = 0; i < snapshotCount; i++) {
-        addRows(this.hmemcache);
-        Snapshot s = runSnapshot(this.hmemcache, log);
-        log.completeCacheFlush(new Text(Integer.toString(i)),
-            tableName, s.sequenceId);
-        // Clean up snapshot now we are done with it.
-        this.hmemcache.deleteSnapshot();
-      }
-      log.close();
-    } finally {
-      log.dir.getFileSystem(this.conf).delete(log.dir);
+    // Add some rows, run a snapshot. Do it a few times.
+    for (int i = 0; i < snapshotCount; i++) {
+      addRows(this.hmemcache);
+      Snapshot s = runSnapshot(this.hmemcache, log);
+      log.completeCacheFlush(new Text(Integer.toString(i)),
+          tableName, s.sequenceId);
+      // Clean up snapshot now we are done with it.
+      this.hmemcache.deleteSnapshot();
     }
+    log.closeAndDelete();
   }
   
   private void isExpectedRow(final int rowIndex,
@@ -157,7 +166,8 @@
     }
   }
 
-  public void testGetFull() throws IOException {
+  /** Test getFull from memcache */
+  public void testGetFull() {
     addRows(this.hmemcache);
     for (int i = 0; i < ROW_COUNT; i++) {
       HStoreKey hsk = new HStoreKey(getRowName(i));
@@ -166,6 +176,10 @@
     }
   }
   
+  /**
+   * Test memcache scanner
+   * @throws IOException
+   */
   public void testScanner() throws IOException {
     addRows(this.hmemcache);
     long timestamp = System.currentTimeMillis();

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java Mon Jun 11 09:46:27 2007
@@ -37,7 +37,7 @@
  * HRegions or in the HBaseMaster, so only basic testing is possible.
  */
 public class TestHRegion extends HBaseTestCase implements RegionUnavailableListener {
-  private Logger LOG = Logger.getLogger(this.getClass().getName());
+  Logger LOG = Logger.getLogger(this.getClass().getName());
   
   /** Constructor */
   public TestHRegion() {
@@ -83,10 +83,9 @@
   private static FileSystem fs = null;
   private static Path parentdir = null;
   private static Path newlogdir = null;
-  private static Path oldlogfile = null;
   private static HLog log = null;
   private static HTableDescriptor desc = null;
-  private static HRegion region = null;
+  static HRegion region = null;
   
   private static int numInserted = 0;
 
@@ -99,14 +98,13 @@
     parentdir = new Path("/hbase");
     fs.mkdirs(parentdir);
     newlogdir = new Path(parentdir, "log");
-    oldlogfile = new Path(parentdir, "oldlogfile");
 
     log = new HLog(fs, newlogdir, conf);
     desc = new HTableDescriptor("test");
     desc.addFamily(new HColumnDescriptor("contents:"));
     desc.addFamily(new HColumnDescriptor("anchor:"));
     region = new HRegion(parentdir, log, fs, conf, 
-        new HRegionInfo(1, desc, null, null), null, oldlogfile);
+        new HRegionInfo(1, desc, null, null), null);
   }
 
   // Test basic functionality. Writes to contents:basic and anchor:anchornum-*
@@ -208,6 +206,7 @@
     List<Thread>threads = new ArrayList<Thread>(threadCount);
     for (int i = 0; i < threadCount; i++) {
       threads.add(new Thread(Integer.toString(i)) {
+        @Override
         public void run() {
           long [] lockids = new long[lockCount];
           // Get locks.
@@ -822,7 +821,7 @@
     f.delete();
   }
   
-  private void cleanup() throws IOException {
+  private void cleanup() {
 
     // Shut down the mini cluster
 

Added: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java?view=auto&rev=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestRegionServerAbort.java Mon Jun 11 09:46:27 2007
@@ -0,0 +1,54 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+/** Tests region server failover when a region server exits cleanly */
+public class TestRegionServerAbort extends HBaseClusterTestCase {
+
+  private HClient client;
+  
+  /** Constructor */
+  public TestRegionServerAbort() {
+    super(2);                                   // Start two region servers
+    client = new HClient(conf);
+  }
+  
+  /** The test */
+  public void testRegionServerAbort() {
+    try {
+      // When the META table can be opened, the region servers are running
+      
+      client.openTable(HConstants.META_TABLE_NAME);
+      
+    } catch(IOException e) {
+      e.printStackTrace();
+      fail();
+    }
+    
+    // Force a region server to exit "ungracefully"
+    
+    this.cluster.abortRegionServer(0);
+    
+    try {
+      Thread.sleep(120000);              // Wait for cluster to adjust
+      
+    } catch(InterruptedException e) {
+    }
+  }
+
+}

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java Mon Jun 11 09:46:27 2007
@@ -132,7 +132,9 @@
     validateRegionInfo(bytes);  
   }
  
-  /** The test! */
+  /** The test!
+   * @throws IOException
+   */
   public void testScanner() throws IOException {
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
@@ -152,7 +154,7 @@
       
       HLog log = new HLog(fs, new Path(regionDir, "log"), conf);
 
-      region = new HRegion(dir, log, fs, conf, REGION_INFO, null, null);
+      region = new HRegion(dir, log, fs, conf, REGION_INFO, null);
       
       // Write information to the meta table
       
@@ -175,7 +177,7 @@
       
       region.close();
       log.rollWriter();
-      region = new HRegion(dir, log, fs, conf, REGION_INFO, null, null);
+      region = new HRegion(dir, log, fs, conf, REGION_INFO, null);
 
       // Verify we can get the data back now that it is on disk.
       
@@ -216,7 +218,7 @@
       
       region.close();
       log.rollWriter();
-      region = new HRegion(dir, log, fs, conf, REGION_INFO, null, null);
+      region = new HRegion(dir, log, fs, conf, REGION_INFO, null);
 
       // Validate again
       
@@ -252,12 +254,17 @@
       
       region.close();
       log.rollWriter();
-      region = new HRegion(dir, log, fs, conf, REGION_INFO, null, null);
+      region = new HRegion(dir, log, fs, conf, REGION_INFO, null);
 
       // Validate again
       
       scan(true, address.toString());
       getRegionInfo();
+      
+      // clean up
+      
+      region.close();
+      log.closeAndDelete();
 
     } catch(IOException e) {
       e.printStackTrace();

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java?view=diff&rev=546192&r1=546191&r2=546192
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java Mon Jun 11 09:46:27 2007
@@ -1,3 +1,18 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;
@@ -43,16 +58,16 @@
     List<HRegion> newRegions = new ArrayList<HRegion>(2);
     newRegions.add(HRegion.createHRegion(
       new HRegionInfo(2L, desc, null, new Text("midway")),
-      homedir, this.conf, null, null));
+      homedir, this.conf, null));
     newRegions.add(HRegion.createHRegion(
       new HRegionInfo(3L, desc, new Text("midway"), null),
-        homedir, this.conf, null, null));
+        homedir, this.conf, null));
     for (HRegion r: newRegions) {
       HRegion.addRegionToMETA(client, HConstants.META_TABLE_NAME, r,
         this.cluster.getHMasterAddress(), -1L);
     }
     regions = scan(client, HConstants.META_TABLE_NAME);
-    assertEquals("Should be two regions only", regions.size(), 2);
+    assertEquals("Should be two regions only", 2, regions.size());
   }
   
   private List<HRegionInfo> scan(final HClient client, final Text table)
@@ -68,8 +83,7 @@
           HMaster.METACOLUMNS, new Text());
       while (true) {
         TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
-        HStoreKey key = new HStoreKey();
-        LabelledData[] values = regionServer.next(scannerId, key);
+        KeyedData[] values = regionServer.next(scannerId);
         if (values.length == 0) {
           break;
         }
@@ -78,16 +92,15 @@
           byte[] bytes = new byte[values[i].getData().getSize()];
           System.arraycopy(values[i].getData().get(), 0, bytes, 0,
             bytes.length);
-          results.put(values[i].getLabel(), bytes);
+          results.put(values[i].getKey().getColumn(), bytes);
         }
 
         HRegionInfo info = HRegion.getRegionInfo(results);
         String serverName = HRegion.getServerName(results);
         long startCode = HRegion.getStartCode(results);
-        LOG.info(Thread.currentThread().getName() + " scanner: " +
-          Long.valueOf(scannerId) + " row: " + key +
-          ": regioninfo: {" + info.toString() + "}, server: " + serverName +
-          ", startCode: " + startCode);
+        LOG.info(Thread.currentThread().getName() + " scanner: "
+            + Long.valueOf(scannerId) + ": regioninfo: {" + info.toString()
+            + "}, server: " + serverName + ", startCode: " + startCode);
         regions.add(info);
       }
     } finally {