You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2009/03/19 10:03:20 UTC

svn commit: r755878 [2/4] - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/client/tableindexed/ src/java/org/apache/hadoop/hbase/client/transactional/ src/java/org/...

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/SequenceFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/SequenceFile.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/SequenceFile.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/SequenceFile.java Thu Mar 19 09:03:08 2009
@@ -205,7 +205,7 @@
   private static final byte BLOCK_COMPRESS_VERSION = (byte)4;
   private static final byte CUSTOM_COMPRESS_VERSION = (byte)5;
   private static final byte VERSION_WITH_METADATA = (byte)6;
-  private static byte[] VERSION = new byte[] {
+  protected static byte[] VERSION = new byte[] {
     (byte)'S', (byte)'E', (byte)'Q', VERSION_WITH_METADATA
   };
 
@@ -594,12 +594,15 @@
     /** Write compressed bytes to outStream. 
      * Note: that it will NOT compress the bytes if they are not compressed.
      * @param outStream : Stream to write compressed bytes into.
+     * @throws IllegalArgumentException 
+     * @throws IOException 
      */
     public void writeCompressedBytes(DataOutputStream outStream) 
       throws IllegalArgumentException, IOException;
 
     /**
      * Size of stored data.
+     * @return int
      */
     public int getSize();
   }
@@ -770,11 +773,13 @@
       return true;
     }
 
+    @Override
     public int hashCode() {
       assert false : "hashCode not designed";
       return 42; // any arbitrary constant will do 
     }
-    
+
+    @Override
     public String toString() {
       StringBuffer sb = new StringBuffer();
       sb.append("size: ").append(this.theMetadata.size()).append("\n");
@@ -830,14 +835,30 @@
     Writer()
     {}
     
-    /** Create the named file. */
+    /** Create the named file. 
+     * @param fs 
+     * @param conf 
+     * @param name 
+     * @param keyClass 
+     * @param valClass 
+     * @throws IOException
+     */
     public Writer(FileSystem fs, Configuration conf, Path name, 
                   Class keyClass, Class valClass)
       throws IOException {
       this(fs, conf, name, keyClass, valClass, null, new Metadata());
     }
     
-    /** Create the named file with write-progress reporter. */
+    /** Create the named file with write-progress reporter. 
+     * @param fs 
+     * @param conf 
+     * @param name 
+     * @param keyClass 
+     * @param valClass 
+     * @param progress 
+     * @param metadata 
+     * @throws IOException
+     */
     public Writer(FileSystem fs, Configuration conf, Path name, 
                   Class keyClass, Class valClass,
                   Progressable progress, Metadata metadata)
@@ -848,13 +869,25 @@
            progress, metadata);
     }
     
-    /** Create the named file with write-progress reporter. */
+    /** Create the named file with write-progress reporter. 
+     * @param fs 
+     * @param conf 
+     * @param name 
+     * @param keyClass 
+     * @param valClass 
+     * @param bufferSize 
+     * @param replication 
+     * @param blockSize 
+     * @param progress 
+     * @param metadata 
+     * @throws IOException
+     */
     public Writer(FileSystem fs, Configuration conf, Path name,
                   Class keyClass, Class valClass,
                   int bufferSize, short replication, long blockSize,
                   Progressable progress, Metadata metadata)
       throws IOException {
-      init(name, conf,
+      init(conf,
            fs.create(name, true, bufferSize, replication, blockSize, progress),
               keyClass, valClass, false, null, metadata);
       initializeFileHeader();
@@ -863,11 +896,11 @@
     }
 
     /** Write to an arbitrary stream using a specified buffer size. */
-    private Writer(Configuration conf, FSDataOutputStream out, 
-                   Class keyClass, Class valClass, Metadata metadata)
+    protected Writer(Configuration conf, FSDataOutputStream out, 
+                  Class keyClass, Class valClass, Metadata metadata)
       throws IOException {
       this.ownOutputStream = false;
-      init(null, conf, out, keyClass, valClass, false, null, metadata);
+      init(conf, out, keyClass, valClass, false, null, metadata);
       
       initializeFileHeader();
       writeFileHeader();
@@ -907,7 +940,7 @@
     
     /** Initialize. */
     @SuppressWarnings("unchecked")
-    void init(Path name, Configuration conf, FSDataOutputStream out,
+    void init(Configuration conf, FSDataOutputStream out,
               Class keyClass, Class valClass,
               boolean compress, CompressionCodec codec, Metadata metadata) 
       throws IOException {
@@ -934,16 +967,24 @@
       }
     }
     
-    /** Returns the class of keys in this file. */
+    /** Returns the class of keys in this file. 
+     * @return Class
+     */
     public Class getKeyClass() { return keyClass; }
 
-    /** Returns the class of values in this file. */
+    /** Returns the class of values in this file. 
+     * @return Class
+     */
     public Class getValueClass() { return valClass; }
 
-    /** Returns the compression codec of data in this file. */
+    /** Returns the compression codec of data in this file. 
+     * @return CompressionCodec
+     */
     public CompressionCodec getCompressionCodec() { return codec; }
     
-    /** create a sync point */
+    /** create a sync point 
+     * @throws IOException
+     */
     public void sync() throws IOException {
       if (sync != null && lastSyncPos != out.getPos()) {
         out.writeInt(SYNC_ESCAPE);                // mark the start of the sync
@@ -955,7 +996,9 @@
     /** Returns the configuration of this file. */
     Configuration getConf() { return conf; }
     
-    /** Close the file. */
+    /** Close the file. 
+     * @throws IOException
+     */
     public synchronized void close() throws IOException {
       keySerializer.close();
       uncompressedValSerializer.close();
@@ -985,13 +1028,21 @@
       }
     }
 
-    /** Append a key/value pair. */
+    /** Append a key/value pair. 
+     * @param key 
+     * @param val 
+     * @throws IOException
+     */
     public synchronized void append(Writable key, Writable val)
       throws IOException {
       append((Object) key, (Object) val);
     }
 
-    /** Append a key/value pair. */
+    /** Append a key/value pair. 
+     * @param key 
+     * @param val 
+     * @throws IOException
+     */
     @SuppressWarnings("unchecked")
     public synchronized void append(Object key, Object val)
       throws IOException {
@@ -1060,14 +1111,32 @@
   /** Write key/compressed-value pairs to a sequence-format file. */
   static class RecordCompressWriter extends Writer {
     
-    /** Create the named file. */
+    /** Create the named file. 
+     * @param fs 
+     * @param conf 
+     * @param name 
+     * @param keyClass 
+     * @param valClass 
+     * @param codec 
+     * @throws IOException
+     */
     public RecordCompressWriter(FileSystem fs, Configuration conf, Path name, 
                                 Class keyClass, Class valClass, CompressionCodec codec) 
       throws IOException {
       this(conf, fs.create(name), keyClass, valClass, codec, new Metadata());
     }
     
-    /** Create the named file with write-progress reporter. */
+    /** Create the named file with write-progress reporter. 
+     * @param fs 
+     * @param conf 
+     * @param name 
+     * @param keyClass 
+     * @param valClass 
+     * @param codec 
+     * @param progress 
+     * @param metadata 
+     * @throws IOException
+     */
     public RecordCompressWriter(FileSystem fs, Configuration conf, Path name, 
                                 Class keyClass, Class valClass, CompressionCodec codec,
                                 Progressable progress, Metadata metadata)
@@ -1078,14 +1147,27 @@
            progress, metadata);
     }
 
-    /** Create the named file with write-progress reporter. */
+    /** Create the named file with write-progress reporter. 
+     * @param fs 
+     * @param conf 
+     * @param name 
+     * @param keyClass 
+     * @param valClass 
+     * @param bufferSize 
+     * @param replication 
+     * @param blockSize 
+     * @param codec 
+     * @param progress 
+     * @param metadata 
+     * @throws IOException
+     */
     public RecordCompressWriter(FileSystem fs, Configuration conf, Path name,
                                 Class keyClass, Class valClass,
                                 int bufferSize, short replication, long blockSize,
                                 CompressionCodec codec,
                                 Progressable progress, Metadata metadata)
       throws IOException {
-      super.init(name, conf,
+      super.init(conf,
                  fs.create(name, true, bufferSize, replication, blockSize, progress),
                  keyClass, valClass, true, codec, metadata);
 
@@ -1094,7 +1176,16 @@
       finalizeFileHeader();
     }
 
-    /** Create the named file with write-progress reporter. */
+    /** Create the named file with write-progress reporter. 
+     * @param fs 
+     * @param conf 
+     * @param name 
+     * @param keyClass 
+     * @param valClass 
+     * @param codec 
+     * @param progress 
+     * @throws IOException
+     */
     public RecordCompressWriter(FileSystem fs, Configuration conf, Path name, 
                                 Class keyClass, Class valClass, CompressionCodec codec,
                                 Progressable progress)
@@ -1103,11 +1194,11 @@
     }
     
     /** Write to an arbitrary stream using a specified buffer size. */
-    private RecordCompressWriter(Configuration conf, FSDataOutputStream out,
-                                 Class keyClass, Class valClass, CompressionCodec codec, Metadata metadata)
+    protected RecordCompressWriter(Configuration conf, FSDataOutputStream out,
+                                Class keyClass, Class valClass, CompressionCodec codec, Metadata metadata)
       throws IOException {
       this.ownOutputStream = false;
-      super.init(null, conf, out, keyClass, valClass, true, codec, metadata);
+      super.init(conf, out, keyClass, valClass, true, codec, metadata);
       
       initializeFileHeader();
       writeFileHeader();
@@ -1115,10 +1206,13 @@
       
     }
     
+    @Override
     boolean isCompressed() { return true; }
+    @Override
     boolean isBlockCompressed() { return false; }
 
     /** Append a key/value pair. */
+    @Override
     @SuppressWarnings("unchecked")
     public synchronized void append(Object key, Object val)
       throws IOException {
@@ -1151,6 +1245,7 @@
     }
 
     /** Append a key/value pair. */
+    @Override
     public synchronized void appendRaw(byte[] keyData, int keyOffset,
         int keyLength, ValueBytes val) throws IOException {
 
@@ -1181,7 +1276,15 @@
 
     private int compressionBlockSize;
     
-    /** Create the named file. */
+    /** Create the named file. 
+     * @param fs 
+     * @param conf 
+     * @param name 
+     * @param keyClass 
+     * @param valClass 
+     * @param codec 
+     * @throws IOException
+     */
     public BlockCompressWriter(FileSystem fs, Configuration conf, Path name, 
                                Class keyClass, Class valClass, CompressionCodec codec) 
       throws IOException {
@@ -1191,7 +1294,17 @@
            null, new Metadata());
     }
     
-    /** Create the named file with write-progress reporter. */
+    /** Create the named file with write-progress reporter. 
+     * @param fs 
+     * @param conf 
+     * @param name 
+     * @param keyClass 
+     * @param valClass 
+     * @param codec 
+     * @param progress 
+     * @param metadata 
+     * @throws IOException
+     */
     public BlockCompressWriter(FileSystem fs, Configuration conf, Path name, 
                                Class keyClass, Class valClass, CompressionCodec codec,
                                Progressable progress, Metadata metadata)
@@ -1202,14 +1315,27 @@
            progress, metadata);
     }
 
-    /** Create the named file with write-progress reporter. */
+    /** Create the named file with write-progress reporter. 
+     * @param fs 
+     * @param conf 
+     * @param name 
+     * @param keyClass 
+     * @param valClass 
+     * @param bufferSize 
+     * @param replication 
+     * @param blockSize 
+     * @param codec 
+     * @param progress 
+     * @param metadata 
+     * @throws IOException
+     */
     public BlockCompressWriter(FileSystem fs, Configuration conf, Path name,
                                Class keyClass, Class valClass,
                                int bufferSize, short replication, long blockSize,
                                CompressionCodec codec,
                                Progressable progress, Metadata metadata)
       throws IOException {
-      super.init(name, conf,
+      super.init(conf,
                  fs.create(name, true, bufferSize, replication, blockSize, progress),
                  keyClass, valClass, true, codec, metadata);
       init(conf.getInt("io.seqfile.compress.blocksize", 1000000));
@@ -1219,7 +1345,16 @@
       finalizeFileHeader();
     }
 
-    /** Create the named file with write-progress reporter. */
+    /** Create the named file with write-progress reporter. 
+     * @param fs 
+     * @param conf 
+     * @param name 
+     * @param keyClass 
+     * @param valClass 
+     * @param codec 
+     * @param progress 
+     * @throws IOException
+     */
     public BlockCompressWriter(FileSystem fs, Configuration conf, Path name, 
                                Class keyClass, Class valClass, CompressionCodec codec,
                                Progressable progress)
@@ -1228,11 +1363,11 @@
     }
     
     /** Write to an arbitrary stream using a specified buffer size. */
-    private BlockCompressWriter(Configuration conf, FSDataOutputStream out,
-                                Class keyClass, Class valClass, CompressionCodec codec, Metadata metadata)
+    protected BlockCompressWriter(Configuration conf, FSDataOutputStream out,
+                               Class keyClass, Class valClass, CompressionCodec codec, Metadata metadata)
       throws IOException {
       this.ownOutputStream = false;
-      super.init(null, conf, out, keyClass, valClass, true, codec, metadata);
+      super.init(conf, out, keyClass, valClass, true, codec, metadata);
       init(1000000);
       
       initializeFileHeader();
@@ -1240,7 +1375,9 @@
       finalizeFileHeader();
     }
     
+    @Override
     boolean isCompressed() { return true; }
+    @Override
     boolean isBlockCompressed() { return true; }
 
     /** Initialize */
@@ -1268,6 +1405,7 @@
     }
     
     /** Compress and flush contents to dfs */
+    @Override
     public synchronized void sync() throws IOException {
       if (noBufferedRecords > 0) {
         super.sync();
@@ -1305,6 +1443,7 @@
     }
 
     /** Append a key/value pair. */
+    @Override
     @SuppressWarnings("unchecked")
     public synchronized void append(Object key, Object val)
       throws IOException {
@@ -1337,6 +1476,7 @@
     }
     
     /** Append a key/value pair. */
+    @Override
     public synchronized void appendRaw(byte[] keyData, int keyOffset,
         int keyLength, ValueBytes val) throws IOException {
       
@@ -1420,7 +1560,12 @@
     private Deserializer keyDeserializer;
     private Deserializer valDeserializer;
 
-    /** Open the named file. */
+    /** Open the named file. 
+     * @param fs 
+     * @param file 
+     * @param conf 
+     * @throws IOException
+     */
     public Reader(FileSystem fs, Path file, Configuration conf)
       throws IOException {
       this(fs, file, conf.getInt("io.file.buffer.size", 4096), conf, false);
@@ -1577,7 +1722,9 @@
       return sf.getDeserializer(c);
     }
     
-    /** Close the file. */
+    /** Close the file. 
+     * @throws IOException
+     */
     public synchronized void close() throws IOException {
       // Return the decompressors to the pool
       CodecPool.returnDecompressor(keyLenDecompressor);
@@ -1598,12 +1745,16 @@
       in.close();
     }
 
-    /** Returns the name of the key class. */
+    /** Returns the name of the key class. 
+     * @return String
+     */
     public String getKeyClassName() {
       return keyClassName;
     }
 
-    /** Returns the class of keys in this file. */
+    /** Returns the class of keys in this file. 
+     * @return Class
+     */
     public synchronized Class<?> getKeyClass() {
       if (null == keyClass) {
         try {
@@ -1615,12 +1766,16 @@
       return keyClass;
     }
 
-    /** Returns the name of the value class. */
+    /** Returns the name of the value class. 
+     * @return String
+     */
     public String getValueClassName() {
       return valClassName;
     }
 
-    /** Returns the class of values in this file. */
+    /** Returns the class of values in this file. 
+     * @return Class
+     */
     public synchronized Class<?> getValueClass() {
       if (null == valClass) {
         try {
@@ -1632,7 +1787,9 @@
       return valClass;
     }
 
-    /** Returns true if values are compressed. */
+    /** Returns true if values are compressed. 
+     * @return
+     */
     public boolean isCompressed() { return decompress; }
     
     /** Returns true if records are block-compressed. */

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java Thu Mar 19 09:03:08 2009
@@ -28,14 +28,14 @@
 public interface BlockCache {
   /**
    * Add block to cache.
-   * @param blockNumber Zero-based file block number.
+   * @param blockName Zero-based file block number.
    * @param buf The block contents wrapped in a ByteBuffer.
    */
   public void cacheBlock(String blockName, ByteBuffer buf);
   
   /**
    * Fetch block from cache.
-   * @param blockNumber Block number to fetch.
+   * @param blockName Block number to fetch.
    * @return Block or null if block is not in the cache.
    */
   public ByteBuffer getBlock(String blockName);  

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java Thu Mar 19 09:03:08 2009
@@ -143,6 +143,7 @@
    */
   public final static Compression.Algorithm DEFAULT_COMPRESSION_ALGORITHM =
     Compression.Algorithm.NONE;
+  /** Default compression name: none. */
   public final static String DEFAULT_COMPRESSION =
     DEFAULT_COMPRESSION_ALGORITHM.getName();
 
@@ -228,7 +229,6 @@
      * @param blocksize
      * @param compress
      * @param comparator
-     * @param bloomfilter
      * @throws IOException 
      * @throws IOException
      */
@@ -268,7 +268,6 @@
      * @param blocksize
      * @param compress
      * @param c
-     * @param bloomfilter
      * @throws IOException
      */
     public Writer(final FSDataOutputStream ostream, final int blocksize,
@@ -428,6 +427,7 @@
       return this.path;
     }
 
+    @Override
     public String toString() {
       return "writer=" + this.name + ", compression=" +
         this.compressAlgo.getName();
@@ -664,6 +664,7 @@
       this.name = this.istream.toString();
     }
 
+    @Override
     public String toString() {
       return "reader=" + this.name +
           (!isFileInfoLoaded()? "":
@@ -1244,6 +1245,7 @@
       }
     }
 
+    @Override
     public String toString() {
       return "fileinfoOffset=" + fileinfoOffset +
       ", dataIndexOffset=" + dataIndexOffset +
@@ -1413,6 +1415,7 @@
       return bi;
     }
 
+    @Override
     public String toString() {
       StringBuilder sb = new StringBuilder();
       sb.append("size=" + count);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java Thu Mar 19 09:03:08 2009
@@ -57,6 +57,7 @@
    * that: k[i] < key.  Furthermore: there may be a k[i+1], such that
    * k[i] < key <= k[i+1] but there may also NOT be a k[i+1], and next() will
    * return false (EOF).
+   * @throws IOException
    */
   public boolean seekBefore(byte [] key) throws IOException;
   /**

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseClient.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseClient.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseClient.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseClient.java Thu Mar 19 09:03:08 2009
@@ -66,20 +66,20 @@
   
   public static final Log LOG =
     LogFactory.getLog("org.apache.hadoop.ipc.HBaseClass");
-  private Hashtable<ConnectionId, Connection> connections =
+  protected Hashtable<ConnectionId, Connection> connections =
     new Hashtable<ConnectionId, Connection>();
 
-  private Class<? extends Writable> valueClass;   // class of call values
-  private int counter;                            // counter for call ids
-  private AtomicBoolean running = new AtomicBoolean(true); // if client runs
-  final private Configuration conf;
-  final private int maxIdleTime; //connections will be culled if it was idle for 
+  protected Class<? extends Writable> valueClass;   // class of call values
+  protected int counter;                            // counter for call ids
+  protected AtomicBoolean running = new AtomicBoolean(true); // if client runs
+  final protected Configuration conf;
+  final protected int maxIdleTime; //connections will be culled if it was idle for 
                            //maxIdleTime msecs
-  final private int maxRetries; //the max. no. of retries for socket connections
-  private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
-  private int pingInterval; // how often sends ping to the server in msecs
+  final protected int maxRetries; //the max. no. of retries for socket connections
+  protected boolean tcpNoDelay; // if T then disable Nagle's Algorithm
+  protected int pingInterval; // how often sends ping to the server in msecs
 
-  private SocketFactory socketFactory;           // how to create sockets
+  protected SocketFactory socketFactory;           // how to create sockets
   private int refCount = 1;
   
   final private static String PING_INTERVAL_NAME = "ipc.ping.interval";
@@ -187,7 +187,7 @@
     // currently active calls
     private Hashtable<Integer, Call> calls = new Hashtable<Integer, Call>();
     private AtomicLong lastActivity = new AtomicLong();// last I/O activity time
-    private AtomicBoolean shouldCloseConnection = new AtomicBoolean();  // indicate if the connection is closed
+    protected AtomicBoolean shouldCloseConnection = new AtomicBoolean();  // indicate if the connection is closed
     private IOException closeException; // close reason
 
     public Connection(InetSocketAddress address) throws IOException {
@@ -219,7 +219,7 @@
      * @param call to add
      * @return true if the call was added.
      */
-    private synchronized boolean addCall(Call call) {
+    protected synchronized boolean addCall(Call call) {
       if (shouldCloseConnection.get())
         return false;
       calls.put(call.id, call);
@@ -244,9 +244,8 @@
       private void handleTimeout(SocketTimeoutException e) throws IOException {
         if (shouldCloseConnection.get() || !running.get()) {
           throw e;
-        } else {
-          sendPing();
         }
+        sendPing();
       }
       
       /** Read a byte from the stream.
@@ -254,6 +253,7 @@
        * until a byte is read.
        * @throws IOException for any IO problem other than socket timeout
        */
+      @Override
       public int read() throws IOException {
         do {
           try {
@@ -270,6 +270,7 @@
        * 
        * @return the total number of bytes read; -1 if the connection is closed.
        */
+      @Override
       public int read(byte[] buf, int off, int len) throws IOException {
         do {
           try {
@@ -285,7 +286,7 @@
      * a header to the server and starts
      * the connection thread that waits for responses.
      */
-    private synchronized void setupIOstreams() {
+    protected synchronized void setupIOstreams() {
       if (socket != null || shouldCloseConnection.get()) {
         return;
       }
@@ -423,7 +424,7 @@
     /* Send a ping to the server if the time elapsed 
      * since last I/O activity is equal to or greater than the ping interval
      */
-    private synchronized void sendPing() throws IOException {
+    protected synchronized void sendPing() throws IOException {
       long curTime = System.currentTimeMillis();
       if ( curTime - lastActivity.get() >= pingInterval) {
         lastActivity.set(curTime);
@@ -434,6 +435,7 @@
       }
     }
 
+    @Override
     public void run() {
       if (LOG.isDebugEnabled())
         LOG.debug(getName() + ": starting, having connections " 
@@ -453,6 +455,7 @@
     /** Initiates a call by sending the parameter to the remote server.
      * Note: this is not called from the Connection thread, but by other
      * threads.
+     * @param call
      */
     public void sendParam(Call call) {
       if (shouldCloseConnection.get()) {
@@ -580,7 +583,7 @@
   /** Call implementation used for parallel calls. */
   private class ParallelCall extends Call {
     private ParallelResults results;
-    private int index;
+    protected int index;
     
     public ParallelCall(Writable param, ParallelResults results, int index) {
       super(param);
@@ -589,6 +592,7 @@
     }
 
     /** Deliver result to result collector. */
+    @Override
     protected void callComplete() {
       results.callComplete(this);
     }
@@ -596,16 +600,19 @@
 
   /** Result collector for parallel calls. */
   private static class ParallelResults {
-    private Writable[] values;
-    private int size;
-    private int count;
+    protected Writable[] values;
+    protected int size;
+    protected int count;
 
     public ParallelResults(int size) {
       this.values = new Writable[size];
       this.size = size;
     }
 
-    /** Collect a result. */
+    /**
+     * Collect a result.
+     * @param call
+     */
     public synchronized void callComplete(ParallelCall call) {
       values[call.index] = call.value;            // store the value
       count++;                                    // count it
@@ -614,8 +621,13 @@
     }
   }
 
-  /** Construct an IPC client whose values are of the given {@link Writable}
-   * class. */
+  /**
+   * Construct an IPC client whose values are of the given {@link Writable}
+   * class.
+   * @param valueClass
+   * @param conf
+   * @param factory
+   */
   public HBaseClient(Class<? extends Writable> valueClass, Configuration conf, 
       SocketFactory factory) {
     this.valueClass = valueClass;
@@ -677,15 +689,20 @@
 
   /** Make a call, passing <code>param</code>, to the IPC server running at
    * <code>address</code>, returning the value.  Throws exceptions if there are
-   * network problems or if the remote code threw an exception. */
+   * network problems or if the remote code threw an exception. 
+   * @param param 
+   * @param address 
+   * @return Writable 
+   * @throws IOException
+   */
   public Writable call(Writable param, InetSocketAddress address)
-  throws InterruptedException, IOException {
+  throws IOException {
       return call(param, address, null);
   }
   
   public Writable call(Writable param, InetSocketAddress addr, 
                        UserGroupInformation ticket)  
-                       throws InterruptedException, IOException {
+                       throws IOException {
     Call call = new Call(param);
     Connection connection = getConnection(addr, ticket, call);
     connection.sendParam(call);                 // send the parameter
@@ -700,12 +717,11 @@
         if (call.error instanceof RemoteException) {
           call.error.fillInStackTrace();
           throw call.error;
-        } else { // local exception
-          throw wrapException(addr, call.error);
         }
-      } else {
-        return call.value;
+        // local exception
+        throw wrapException(addr, call.error);
       }
+      return call.value;
     }
   }
 
@@ -743,7 +759,12 @@
   /** Makes a set of calls in parallel.  Each parameter is sent to the
    * corresponding address.  When all values are available, or have timed out
    * or errored, the collected results are returned in an array.  The array
-   * contains nulls for calls that timed out or errored.  */
+   * contains nulls for calls that timed out or errored.  
+   * @param params 
+   * @param addresses 
+   * @return  Writable[]
+   * @throws IOException
+   */
   public Writable[] call(Writable[] params, InetSocketAddress[] addresses)
     throws IOException {
     if (addresses.length == 0) return new Writable[0];

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java Thu Mar 19 09:03:08 2009
@@ -78,7 +78,7 @@
   // Leave this out in the hadoop ipc package but keep class name.  Do this
   // so that we dont' get the logging of this class's invocations by doing our
   // blanket enabling DEBUG on the o.a.h.h. package.
-  private static final Log LOG =
+  protected static final Log LOG =
     LogFactory.getLog("org.apache.hadoop.ipc.HbaseRPC");
 
   private HBaseRPC() {
@@ -236,6 +236,8 @@
     private Map<SocketFactory, HBaseClient> clients =
       new HashMap<SocketFactory, HBaseClient>();
 
+    protected ClientCache() {}
+
     /**
      * Construct & cache an IPC client with the user-provided SocketFactory 
      * if no cached client exists.
@@ -243,7 +245,7 @@
      * @param conf Configuration
      * @return an IPC client
      */
-    private synchronized HBaseClient getClient(Configuration conf,
+    protected synchronized HBaseClient getClient(Configuration conf,
         SocketFactory factory) {
       // Construct & cache client.  The configuration is only used for timeout,
       // and Clients have connection pools.  So we can either (a) lose some
@@ -256,7 +258,7 @@
         client = new HBaseClient(HbaseObjectWritable.class, conf, factory);
         clients.put(factory, client);
       } else {
-        ((HBaseClient)client).incCount();
+        client.incCount();
       }
       return client;
     }
@@ -268,7 +270,7 @@
      * @param conf Configuration
      * @return an IPC client
      */
-    private synchronized HBaseClient getClient(Configuration conf) {
+    protected synchronized HBaseClient getClient(Configuration conf) {
       return getClient(conf, SocketFactory.getDefault());
     }
 
@@ -276,20 +278,20 @@
      * Stop a RPC client connection 
      * A RPC client is closed only when its reference count becomes zero.
      */
-    private void stopClient(HBaseClient client) {
+    protected void stopClient(HBaseClient client) {
       synchronized (this) {
-        ((HBaseClient)client).decCount();
-        if (((HBaseClient)client).isZeroReference()) {
-          clients.remove(((HBaseClient)client).getSocketFactory());
+        client.decCount();
+        if (client.isZeroReference()) {
+          clients.remove(client.getSocketFactory());
         }
       }
-      if (((HBaseClient)client).isZeroReference()) {
+      if (client.isZeroReference()) {
         client.stop();
       }
     }
   }
 
-  private static ClientCache CLIENTS = new ClientCache();
+  protected final static ClientCache CLIENTS = new ClientCache();
   
   private static class Invoker implements InvocationHandler {
     private InetSocketAddress address;
@@ -310,9 +312,8 @@
       this.client = CLIENTS.getClient(conf, factory);
     }
 
-    public Object invoke(@SuppressWarnings("unused") Object proxy,
-        Method method, Object[] args)
-      throws Throwable {
+    public Object invoke(Object proxy, Method method, Object[] args)
+        throws Throwable {
       final boolean logDebug = LOG.isDebugEnabled();
       long startTime = 0;
       if (logDebug) {
@@ -328,7 +329,7 @@
     }
     
     /* close the IPC client that's responsible for this invoker's RPCs */ 
-    synchronized private void close() {
+    synchronized protected void close() {
       if (!isClosed) {
         isClosed = true;
         CLIENTS.stopClient(client);
@@ -468,10 +469,9 @@
                                                   clientVersion);
     if (serverVersion == clientVersion) {
       return proxy;
-    } else {
-      throw new VersionMismatch(protocol.getName(), clientVersion, 
-                                serverVersion);
     }
+    throw new VersionMismatch(protocol.getName(), clientVersion, 
+                              serverVersion);
   }
 
   /**
@@ -657,11 +657,10 @@
         Throwable target = e.getTargetException();
         if (target instanceof IOException) {
           throw (IOException)target;
-        } else {
-          IOException ioe = new IOException(target.toString());
-          ioe.setStackTrace(target.getStackTrace());
-          throw ioe;
         }
+        IOException ioe = new IOException(target.toString());
+        ioe.setStackTrace(target.getStackTrace());
+        throw ioe;
       } catch (Throwable e) {
         IOException ioe = new IOException(e.toString());
         ioe.setStackTrace(e.getStackTrace());
@@ -670,9 +669,10 @@
     }
   }
 
-  private static void log(String value) {
-    if (value!= null && value.length() > 55)
-      value = value.substring(0, 55)+"...";
-    LOG.info(value);
+  protected static void log(String value) {
+    String v = value;
+    if (v != null && v.length() > 55)
+      v = v.substring(0, 55)+"...";
+    LOG.info(v);
   }
 }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java Thu Mar 19 09:03:08 2009
@@ -47,7 +47,7 @@
   private MetricsRecord metricsRecord;
   private static Log LOG = LogFactory.getLog(HBaseRpcMetrics.class);
   
-  public HBaseRpcMetrics(String hostName, String port, HBaseServer server) {
+  public HBaseRpcMetrics(String hostName, String port) {
     MetricsContext context = MetricsUtil.getContext("rpc");
     metricsRecord = MetricsUtil.createRecord(context, "metrics");
 
@@ -75,6 +75,7 @@
   
   /**
    * Push the metrics to the monitoring subsystem on doUpdate() call.
+   * @param context
    */
   public void doUpdates(MetricsContext context) {
     rpcQueueTime.pushMetric(metricsRecord);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java Thu Mar 19 09:03:08 2009
@@ -86,12 +86,14 @@
   public static final Log LOG =
     LogFactory.getLog("org.apache.hadoop.ipc.HBaseServer");
 
-  private static final ThreadLocal<HBaseServer> SERVER = new ThreadLocal<HBaseServer>();
+  protected static final ThreadLocal<HBaseServer> SERVER = new ThreadLocal<HBaseServer>();
 
   /** Returns the server instance called under or null.  May be called under
    * {@link #call(Writable, long)} implementations, and under {@link Writable}
    * methods of paramters and return values.  Permits applications to access
-   * the server context.*/
+   * the server context.
+   * @return HBaseServer
+   */
   public static HBaseServer get() {
     return SERVER.get();
   }
@@ -99,10 +101,11 @@
   /** This is set to Call object before Handler invokes an RPC and reset
    * after the call returns.
    */
-  private static final ThreadLocal<Call> CurCall = new ThreadLocal<Call>();
+  protected static final ThreadLocal<Call> CurCall = new ThreadLocal<Call>();
   
   /** Returns the remote side ip address when invoked inside an RPC 
    *  Returns null incase of an error.
+   *  @return InetAddress
    */
   public static InetAddress getRemoteIp() {
     Call call = CurCall.get();
@@ -113,46 +116,47 @@
   }
   /** Returns remote address as a string when invoked inside an RPC.
    *  Returns null in case of an error.
+   *  @return String
    */
   public static String getRemoteAddress() {
     InetAddress addr = getRemoteIp();
     return (addr == null) ? null : addr.getHostAddress();
   }
 
-  private String bindAddress; 
-  private int port;                               // port we listen on
+  protected String bindAddress; 
+  protected int port;                             // port we listen on
   private int handlerCount;                       // number of handler threads
-  private Class<? extends Writable> paramClass;   // class of call parameters
-  private int maxIdleTime;                        // the maximum idle time after 
+  protected Class<? extends Writable> paramClass; // class of call parameters
+  protected int maxIdleTime;                      // the maximum idle time after 
                                                   // which a client may be disconnected
-  private int thresholdIdleConnections;           // the number of idle connections
+  protected int thresholdIdleConnections;         // the number of idle connections
                                                   // after which we will start
                                                   // cleaning up idle 
                                                   // connections
   int maxConnectionsToNuke;                       // the max number of 
                                                   // connections to nuke
-                                                  //during a cleanup
+                                                  // during a cleanup
   
   protected HBaseRpcMetrics  rpcMetrics;
   
-  private Configuration conf;
+  protected Configuration conf;
 
   private int maxQueueSize;
-  private int socketSendBufferSize;
-  private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
+  protected int socketSendBufferSize;
+  protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
 
-  volatile private boolean running = true;         // true while server runs
-  private BlockingQueue<Call> callQueue; // queued calls
+  volatile protected boolean running = true;         // true while server runs
+  protected BlockingQueue<Call> callQueue; // queued calls
 
-  private List<Connection> connectionList = 
+  protected List<Connection> connectionList = 
     Collections.synchronizedList(new LinkedList<Connection>());
   //maintain a list
   //of client connections
   private Listener listener = null;
-  private Responder responder = null;
-  private int numConnections = 0;
+  protected Responder responder = null;
+  protected int numConnections = 0;
   private Handler[] handlers = null;
-  private HBaseRPCErrorHandler errorHandler = null;
+  protected HBaseRPCErrorHandler errorHandler = null;
 
   /**
    * A convenience method to bind to a given address and report 
@@ -179,20 +183,19 @@
       if ("Unresolved address".equals(e.getMessage())) {
         throw new UnknownHostException("Invalid hostname for server: " + 
                                        address.getHostName());
-      } else {
-        throw e;
       }
+      throw e;
     }
   }
 
   /** A call queued for handling. */
   private static class Call {
-    private int id;                               // the client's call id
-    private Writable param;                       // the parameter passed
-    private Connection connection;                // connection to client
-    private long timestamp;     // the time received when response is null
+    protected int id;                             // the client's call id
+    protected Writable param;                     // the parameter passed
+    protected Connection connection;              // connection to client
+    protected long timestamp;      // the time received when response is null
                                    // the time served when response is not null
-    private ByteBuffer response;                      // the response for this call
+    protected ByteBuffer response;                // the response for this call
 
     public Call(int id, Writable param, Connection connection) {
       this.id = id;
@@ -317,7 +320,7 @@
           if (errorHandler != null) {
             if (errorHandler.checkOOME(e)) {
               LOG.info(getName() + ": exiting on OOME");
-              closeCurrentConnection(key, e);
+              closeCurrentConnection(key);
               cleanupConnections(true);
               return;
             }
@@ -326,7 +329,7 @@
             // log the event and sleep for a minute and give 
             // some thread(s) a chance to finish
             LOG.warn("Out of Memory in server select", e);
-            closeCurrentConnection(key, e);
+            closeCurrentConnection(key);
             cleanupConnections(true);
             try { Thread.sleep(60000); } catch (Exception ie) {}
       }
@@ -336,7 +339,7 @@
                      StringUtils.stringifyException(e));
           }
         } catch (Exception e) {
-          closeCurrentConnection(key, e);
+          closeCurrentConnection(key);
         }
         cleanupConnections(false);
       }
@@ -358,7 +361,7 @@
       }
     }
 
-    private void closeCurrentConnection(SelectionKey key, Throwable e) {
+    private void closeCurrentConnection(SelectionKey key) {
       if (key != null) {
         Connection c = (Connection)key.attachment();
         if (c != null) {
@@ -385,7 +388,7 @@
         channel.configureBlocking(false);
         channel.socket().setTcpNoDelay(tcpNoDelay);
         SelectionKey readKey = channel.register(selector, SelectionKey.OP_READ);
-        c = new Connection(readKey, channel, System.currentTimeMillis());
+        c = new Connection(channel, System.currentTimeMillis());
         readKey.attach(c);
         synchronized (connectionList) {
           connectionList.add(numConnections, c);
@@ -504,11 +507,7 @@
           }
           
           for(Call call : calls) {
-            try {
-              doPurge(call, now);
-            } catch (IOException e) {
-              LOG.warn("Error in purging old calls " + e);
-            }
+            doPurge(call, now);
           }
         } catch (OutOfMemoryError e) {
           if (errorHandler != null) {
@@ -562,14 +561,14 @@
     // Remove calls that have been pending in the responseQueue 
     // for a long time.
     //
-    private void doPurge(Call call, long now) throws IOException {
+    private void doPurge(Call call, long now) {
       LinkedList<Call> responseQueue = call.connection.responseQueue;
       synchronized (responseQueue) {
         Iterator<Call> iter = responseQueue.listIterator(0);
         while (iter.hasNext()) {
-          call = iter.next();
-          if (now > call.timestamp + PURGE_INTERVAL) {
-            closeConnection(call.connection);
+          Call nextCall = iter.next();
+          if (now > nextCall.timestamp + PURGE_INTERVAL) {
+            closeConnection(nextCall.connection);
             break;
           }
         }
@@ -698,22 +697,21 @@
                                          //version are read
     private boolean headerRead = false;  //if the connection header that
                                          //follows version is read.
-    private SocketChannel channel;
+    protected SocketChannel channel;
     private ByteBuffer data;
     private ByteBuffer dataLengthBuffer;
-    private LinkedList<Call> responseQueue;
+    protected LinkedList<Call> responseQueue;
     private volatile int rpcCount = 0; // number of outstanding rpcs
     private long lastContact;
     private int dataLength;
-    private Socket socket;
+    protected Socket socket;
     // Cache the remote host & port info so that even if the socket is 
     // disconnected, we can say where it used to connect to.
     private String hostAddress;
     private int remotePort;
-    private UserGroupInformation ticket = null;
+    protected UserGroupInformation ticket = null;
 
-    public Connection(SelectionKey key, SocketChannel channel, 
-                      long lastContact) {
+    public Connection(SocketChannel channel, long lastContact) {
       this.channel = channel;
       this.lastContact = lastContact;
       this.data = null;
@@ -760,7 +758,7 @@
     }
     
     /* Decrement the outstanding RPC count */
-    private void decRpcCount() {
+    protected void decRpcCount() {
       rpcCount--;
     }
     
@@ -769,7 +767,7 @@
       rpcCount++;
     }
     
-    private boolean timedOut(long currentTime) {
+    protected boolean timedOut(long currentTime) {
       if (isIdle() && currentTime -  lastContact > maxIdleTime)
         return true;
       return false;
@@ -831,12 +829,11 @@
             processData();
             data = null;
             return count;
-          } else {
-            processHeader();
-            headerRead = true;
-            data = null;
-            continue;
           }
+          processHeader();
+          headerRead = true;
+          data = null;
+          continue;
         } 
         return count;
       }
@@ -867,7 +864,7 @@
       callQueue.put(call);              // queue the call; maybe blocked here
     }
 
-    private synchronized void close() throws IOException {
+    protected synchronized void close() {
       data = null;
       dataLengthBuffer = null;
       if (!channel.isOpen())
@@ -995,30 +992,28 @@
     listener = new Listener();
     this.port = listener.getAddress().getPort();    
     this.rpcMetrics = new HBaseRpcMetrics(serverName,
-                          Integer.toString(this.port), this);
+                          Integer.toString(this.port));
     this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false);
 
-
     // Create the responder here
     responder = new Responder();
   }
 
-  private void closeConnection(Connection connection) {
+  protected void closeConnection(Connection connection) {
     synchronized (connectionList) {
       if (connectionList.remove(connection))
         numConnections--;
     }
-    try {
-      connection.close();
-    } catch (IOException e) {
-    }
+    connection.close();
   }
   
-  /** Sets the socket buffer size used for responding to RPCs */
+  /** Sets the socket buffer size used for responding to RPCs.
+   * @param size
+   */
   public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; }
 
   /** Starts the service.  Must be called before any calls will be handled. */
-  public synchronized void start() throws IOException {
+  public synchronized void start() {
     responder.start();
     listener.start();
     handlers = new Handler[handlerCount];
@@ -1052,6 +1047,7 @@
   /** Wait for the server to be stopped.
    * Does not wait for all subthreads to finish.
    *  See {@link #stop()}.
+   * @throws InterruptedException
    */
   public synchronized void join() throws InterruptedException {
     while (running) {
@@ -1067,11 +1063,15 @@
     return listener.getAddress();
   }
   
-  /** Called for each call. */
+  /** Called for each call. 
+   * @param param 
+   * @param receiveTime 
+   * @return Writable 
+   * @throws IOException
+   */
   public abstract Writable call(Writable param, long receiveTime)
                                                 throws IOException;
-  
-  
+
   /**
    * The number of open RPC conections
    * @return the number of open rpc connections
@@ -1113,14 +1113,12 @@
    *
    * @see WritableByteChannel#write(ByteBuffer)
    */
-  private static int channelWrite(WritableByteChannel channel, 
-                                  ByteBuffer buffer) throws IOException {
-    
+  protected static int channelWrite(WritableByteChannel channel, 
+                                    ByteBuffer buffer) throws IOException {
     return (buffer.remaining() <= NIO_BUFFER_LIMIT) ?
            channel.write(buffer) : channelIO(null, channel, buffer);
   }
-  
-  
+
   /**
    * This is a wrapper around {@link ReadableByteChannel#read(ByteBuffer)}.
    * If the amount of data is large, it writes to channel in smaller chunks. 
@@ -1129,13 +1127,12 @@
    * 
    * @see ReadableByteChannel#read(ByteBuffer)
    */
-  private static int channelRead(ReadableByteChannel channel, 
-                                 ByteBuffer buffer) throws IOException {
-    
+  protected static int channelRead(ReadableByteChannel channel, 
+                                   ByteBuffer buffer) throws IOException {
     return (buffer.remaining() <= NIO_BUFFER_LIMIT) ?
            channel.read(buffer) : channelIO(channel, null, buffer);
   }
-  
+
   /**
    * Helper for {@link #channelRead(ReadableByteChannel, ByteBuffer)}
    * and {@link #channelWrite(WritableByteChannel, ByteBuffer)}. Only

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java Thu Mar 19 09:03:08 2009
@@ -86,6 +86,11 @@
    * 
    * Pass the new key and value to reduce.
    * If any of the grouping columns are not found in the value, the record is skipped.
+   * @param key 
+   * @param value 
+   * @param output 
+   * @param reporter 
+   * @throws IOException 
    */
   public void map(ImmutableBytesWritable key, RowResult value, 
       OutputCollector<ImmutableBytesWritable,RowResult> output,

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java Thu Mar 19 09:03:08 2009
@@ -59,6 +59,11 @@
 
   /**
    * Pass the key, value to reduce
+   * @param key 
+   * @param value 
+   * @param output 
+   * @param reporter 
+   * @throws IOException 
    */
   public void map(ImmutableBytesWritable key, RowResult value,
       OutputCollector<ImmutableBytesWritable,RowResult> output,

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java Thu Mar 19 09:03:08 2009
@@ -42,6 +42,11 @@
   
   /**
    * No aggregation, output pairs of (key, record)
+   * @param key 
+   * @param values 
+   * @param output 
+   * @param reporter 
+   * @throws IOException 
    */
   public void reduce(ImmutableBytesWritable key, Iterator<BatchUpdate> values,
       OutputCollector<ImmutableBytesWritable, BatchUpdate> output,

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java Thu Mar 19 09:03:08 2009
@@ -45,6 +45,8 @@
     FileOutputFormat<ImmutableBytesWritable, LuceneDocumentWrapper> {
   static final Log LOG = LogFactory.getLog(IndexOutputFormat.class);
 
+  private Random random = new Random();
+
   @Override
   public RecordWriter<ImmutableBytesWritable, LuceneDocumentWrapper>
   getRecordWriter(final FileSystem fs, JobConf job, String name,
@@ -53,7 +55,7 @@
 
     final Path perm = new Path(FileOutputFormat.getOutputPath(job), name);
     final Path temp = job.getLocalPath("index/_"
-        + Integer.toString(new Random().nextInt()));
+        + Integer.toString(random.nextInt()));
 
     LOG.info("To index into " + perm);
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/RowCounter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/RowCounter.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/RowCounter.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/RowCounter.java Thu Mar 19 09:03:08 2009
@@ -82,7 +82,6 @@
    * @return the JobConf
    * @throws IOException
    */
-  @SuppressWarnings("unused")
   public JobConf createSubmittableJob(String[] args) throws IOException {
     JobConf c = new JobConf(getConf(), RowCounter.class);
     c.setJobName(NAME);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java Thu Mar 19 09:03:08 2009
@@ -35,6 +35,7 @@
 import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.RowResult;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.InputSplit;

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java Thu Mar 19 09:03:08 2009
@@ -50,7 +50,7 @@
    * Convert Reduce output (key, value) to (HStoreKey, KeyedDataArrayWritable) 
    * and write to an HBase table
    */
-  protected class TableRecordWriter
+  protected static class TableRecordWriter
     implements RecordWriter<ImmutableBytesWritable, BatchUpdate> {
     private HTable m_table;
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java Thu Mar 19 09:03:08 2009
@@ -110,7 +110,7 @@
   
   // will use this variable to synchronize and make sure we aren't interrupted 
   // mid-scan
-  final Integer scannerLock = new Integer(0);
+  final Object scannerLock = new Object();
   
   BaseScanner(final HMaster master, final boolean rootRegion, final int period,
       final AtomicBoolean stop) {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java Thu Mar 19 09:03:08 2009
@@ -38,8 +38,7 @@
   }
 
   @Override
-  protected void processScanItem(@SuppressWarnings("unused") String serverName,
-      final HRegionInfo info)
+  protected void processScanItem(String serverName, final HRegionInfo info)
   throws IOException {
     if (isEnabled(info)) {
       throw new TableNotDisabledException(tableName);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java Thu Mar 19 09:03:08 2009
@@ -96,8 +96,7 @@
 
   static final Log LOG = LogFactory.getLog(HMaster.class.getName());
 
-  public long getProtocolVersion(@SuppressWarnings("unused") String protocol,
-      @SuppressWarnings("unused") long clientVersion) {
+  public long getProtocolVersion(String protocol, long clientVersion) {
     return HBaseRPCProtocolVersion.versionID;
   }
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaRegion.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaRegion.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaRegion.java Thu Mar 19 09:03:08 2009
@@ -19,6 +19,8 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import java.util.Arrays;
+
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -76,8 +78,8 @@
 
   @Override
   public int hashCode() {
-    int result = this.regionName.hashCode();
-    result ^= this.startKey.hashCode();
+    int result = Arrays.hashCode(this.regionName);
+    result ^= Arrays.hashCode(this.startKey);
     return result;
   }
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java Thu Mar 19 09:03:08 2009
@@ -57,10 +57,10 @@
   }
 
   @Override
-  protected void processScanItem(@SuppressWarnings("unused") String serverName,
+  protected void processScanItem(String serverName,
       final HRegionInfo info) throws IOException {
     if (isEnabled(info)) {
-      throw new TableNotDisabledException(tableName.toString());
+      throw new TableNotDisabledException(Bytes.toString(tableName));
     }
   }
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java Thu Mar 19 09:03:08 2009
@@ -42,7 +42,6 @@
    * @param regionInfo
    * @throws IOException
    */
-  @SuppressWarnings("unused")
   public ProcessRegionOpen(HMaster master, HServerInfo info,
       HRegionInfo regionInfo)
   throws IOException {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java Thu Mar 19 09:03:08 2009
@@ -51,7 +51,7 @@
   private boolean rootRescanned;
   
 
-  private class ToDoEntry {
+  private static class ToDoEntry {
     boolean regionOffline;
     final byte [] row;
     final HRegionInfo info;

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java Thu Mar 19 09:03:08 2009
@@ -1315,6 +1315,12 @@
     
     @Override
     public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
       return this.compareTo((RegionState) o) == 0;
     }
     

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java Thu Mar 19 09:03:08 2009
@@ -32,6 +32,7 @@
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Sleeper;
 
 /**
@@ -75,8 +76,8 @@
           if (LOG.isDebugEnabled()) {
             StringBuilder message = new StringBuilder(
                 "Trying to contact region server for regionName '" + 
-                m.getRegionName() + "', but failed after " + (tries + 1)  + 
-                " attempts.\n");
+                Bytes.toString(m.getRegionName()) + "', but failed after " +
+                (tries + 1) + " attempts.\n");
             int i = 1;
             for (IOException e2 : exceptions) {
               message.append("Exception " + i + ":\n" + e2);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java Thu Mar 19 09:03:08 2009
@@ -42,6 +42,7 @@
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.Leases;
 import org.apache.hadoop.hbase.HMsg.Type;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
 import org.apache.zookeeper.WatchedEvent;
 import org.apache.zookeeper.Watcher;
@@ -76,7 +77,7 @@
    * and it's server logs are recovered, it will be told to call server startup
    * because by then, its regions have probably been reassigned.
    */
-  private final Set<String> deadServers =
+  protected final Set<String> deadServers =
     Collections.synchronizedSet(new HashSet<String>());
 
   /** SortedMap server load -> Set of server names */
@@ -87,7 +88,7 @@
   final Map<String, HServerLoad> serversToLoad =
     new ConcurrentHashMap<String, HServerLoad>();  
 
-  private HMaster master;
+  protected HMaster master;
   
   // Last time we logged average load.
   private volatile long lastLogOfAverageLaod = 0;
@@ -490,7 +491,7 @@
       if (duplicateAssignment) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("region server " + serverInfo.getServerAddress().toString()
-              + " should not have opened region " + region.getRegionName());
+              + " should not have opened region " + Bytes.toString(region.getRegionName()));
         }
 
         // This Region should not have been opened.

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableDelete.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableDelete.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableDelete.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableDelete.java Thu Mar 19 09:03:08 2009
@@ -29,6 +29,7 @@
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /** 
  * Instantiated to delete a table. Table must be offline.
@@ -41,7 +42,7 @@
   }
 
   @Override
-  protected void processScanItem(@SuppressWarnings("unused") String serverName,
+  protected void processScanItem(String serverName,
       final HRegionInfo info) throws IOException {
     
     if (isEnabled(info)) {
@@ -59,12 +60,12 @@
         HRegion.deleteRegion(this.master.fs, this.master.rootdir, i);
       
       } catch (IOException e) {
-        LOG.error("failed to delete region " + i.getRegionName(),
+        LOG.error("failed to delete region " + Bytes.toString(i.getRegionName()),
           RemoteExceptionHandler.checkIOException(e));
       }
     }
     
     // delete the table's folder from fs.
-    master.fs.delete(new Path(master.rootdir, tableName.toString()), true);
+    master.fs.delete(new Path(master.rootdir, Bytes.toString(tableName)), true);
   }
 }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java Thu Mar 19 09:03:08 2009
@@ -89,7 +89,8 @@
           HRegionInfo info = this.master.getHRegionInfo(values.getRow(), values);
           if (info == null) {
             emptyRows.add(values.getRow());
-            LOG.error(COL_REGIONINFO + " not found on " + values.getRow());
+            LOG.error(Bytes.toString(COL_REGIONINFO) + " not found on " +
+                      Bytes.toString(values.getRow()));
             continue;
           }
           String serverAddress = Writables.cellToString(values.get(COL_SERVER));
@@ -125,7 +126,7 @@
       if (emptyRows.size() > 0) {
         LOG.warn("Found " + emptyRows.size() +
             " rows with empty HRegionInfo while scanning meta region " +
-            m.getRegionName());
+            Bytes.toString(m.getRegionName()));
         master.deleteEmptyMetaRows(server, m.getRegionName(), emptyRows);
       }
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java Thu Mar 19 09:03:08 2009
@@ -61,6 +61,7 @@
   /**
    * Since this object is a registered updater, this method will be called
    * periodically, e.g. every 5 seconds.
+   * @param unused 
    */
   public void doUpdates(MetricsContext unused) {
     synchronized (this) {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/metrics/MetricsRate.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/metrics/MetricsRate.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/metrics/MetricsRate.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/metrics/MetricsRate.java Thu Mar 19 09:03:08 2009
@@ -53,7 +53,7 @@
     long now = System.currentTimeMillis();
     long diff = (now-ts)/1000;
     if (diff == 0) diff = 1; // sigh this is crap.
-    this.prevRate = value / diff;
+    this.prevRate = (float)value / diff;
     this.value = 0;
     this.ts = now;
   }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java Thu Mar 19 09:03:08 2009
@@ -122,7 +122,7 @@
 
   private volatile boolean closed = false;
 
-  private final Integer sequenceLock = new Integer(0);
+  private final Object sequenceLock = new Object();
   private volatile long logSeqNum = 0;
 
   private volatile long filenum = 0;
@@ -136,7 +136,7 @@
 
   // We synchronize on updateLock to prevent updates and to prevent a log roll
   // during an update
-  private final Integer updateLock = new Integer(0);
+  private final Object updateLock = new Object();
   
   /*
    * If more than this many logs, force flush of oldest region to oldest edit

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java Thu Mar 19 09:03:08 2009
@@ -61,6 +61,8 @@
 
   /**
    * @param value
+   * @param offset 
+   * @param length 
    * @return True if an entry and its content is {@link #DELETED_BYTES}.
    */
   public static boolean isDeleted(final byte [] value, final int offset,

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java Thu Mar 19 09:03:08 2009
@@ -23,6 +23,7 @@
 import org.apache.hadoop.io.*;
 
 import java.io.*;
+import java.util.Arrays;
 
 /**
  * A Key for an entry in the change log.
@@ -94,13 +95,19 @@
   
   @Override
   public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null || getClass() != obj.getClass()) {
+      return false;
+    }
     return compareTo((HLogKey)obj) == 0;
   }
   
   @Override
   public int hashCode() {
-    int result = this.regionName.hashCode();
-    result ^= this.row.hashCode(); 
+    int result = Arrays.hashCode(this.regionName);
+    result ^= Arrays.hashCode(this.row);
     result ^= this.logSeqNum;
     return result;
   }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Thu Mar 19 09:03:08 2009
@@ -192,7 +192,7 @@
   // Stop updates lock
   private final ReentrantReadWriteLock updatesLock =
     new ReentrantReadWriteLock();
-  private final Integer splitLock = new Integer(0);
+  private final Object splitLock = new Object();
   private long minSequenceId;
   final AtomicInteger activeScannerCount = new AtomicInteger(0);
 
@@ -2474,7 +2474,8 @@
 
     LOG.info("starting merge of regions: " + a + " and " + b +
       " into new region " + newRegionInfo.toString() +
-        " with start key <" + startKey + "> and end key <" + endKey + ">");
+        " with start key <" + Bytes.toString(startKey) + "> and end key <" +
+        Bytes.toString(endKey) + ">");
 
     // Move HStoreFiles under new region directory
     
@@ -2665,7 +2666,7 @@
   private byte [] binaryIncrement(byte [] value, long amount) {
     for(int i=0;i<value.length;i++) {
       int cur = (int)(amount >> (8 * i)) % 256;
-      int val = (int)(value[value.length-i-1] & 0xff);
+      int val = value[value.length-i-1] & 0xff;
       int total = cur + val;
       if(total > 255) {
         amount += ((long)256 << (8 * i));

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Thu Mar 19 09:03:08 2009
@@ -136,7 +136,7 @@
   protected final HBaseConfiguration conf;
 
   private final ServerConnection connection;
-  private final AtomicBoolean haveRootRegion = new AtomicBoolean(false);
+  protected final AtomicBoolean haveRootRegion = new AtomicBoolean(false);
   private FileSystem fs;
   private Path rootDir;
   private final Random rand = new Random();
@@ -757,6 +757,8 @@
    * Thread for toggling safemode after some configurable interval.
    */
   private class CompactionLimitThread extends Thread {
+    protected CompactionLimitThread() {}
+
     @Override
     public void run() {
       // First wait until we exit safe mode
@@ -1233,8 +1235,8 @@
    * Data structure to hold a HMsg and retries count.
    */
   private static class ToDoEntry {
-    private int tries;
-    private final HMsg msg;
+    protected int tries;
+    protected final HMsg msg;
     ToDoEntry(HMsg msg) {
       this.tries = 0;
       this.msg = msg;
@@ -1406,14 +1408,14 @@
     return r; 
   }
   
-  /*
+  /**
    * Add a MSG_REPORT_PROCESS_OPEN to the outbound queue.
    * This method is called while region is in the queue of regions to process
    * and then while the region is being opened, it is called from the Worker
    * thread that is running the region open.
    * @param hri Region to add the message for
    */
-  protected void addProcessingMessage(final HRegionInfo hri) {
+  public void addProcessingMessage(final HRegionInfo hri) {
     getOutboundMsgs().add(new HMsg(HMsg.Type.MSG_REPORT_PROCESS_OPEN, hri));
   }
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java Thu Mar 19 09:03:08 2009
@@ -84,7 +84,8 @@
   /**
    * Constructor.
    * @param ttl The TTL for cache entries, in milliseconds.
-   * @param regionInfo The HRI for this cache 
+   * @param c 
+   * @param rc
    */
   public Memcache(final long ttl, final Comparator<HStoreKey> c,
       final HStoreKey.StoreKeyComparator rc) {
@@ -454,7 +455,7 @@
             if (Store.notExpiredAndNotInDeletes(this.ttl, 
                 found_key, now, deletes)) {
               candidateKeys.put(stripTimestamp(found_key),
-                new Long(found_key.getTimestamp()));
+                Long.valueOf(found_key.getTimestamp()));
             } else {
               if (deletedOrExpiredRow == null) {
                 deletedOrExpiredRow = new HStoreKey(found_key);
@@ -523,7 +524,7 @@
           if (Store.notExpiredAndNotInDeletes(this.ttl, found_key, now, deletes)) {
             lastRowFound = found_key.getRow();
             candidateKeys.put(stripTimestamp(found_key), 
-              new Long(found_key.getTimestamp()));
+              Long.valueOf(found_key.getTimestamp()));
           } else {
             expires.add(found_key);
             if (LOG.isDebugEnabled()) {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/MemcacheFlusher.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/MemcacheFlusher.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/MemcacheFlusher.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/MemcacheFlusher.java Thu Mar 19 09:03:08 2009
@@ -36,6 +36,7 @@
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -144,7 +145,7 @@
         continue;
       } catch (Exception ex) {
         LOG.error("Cache flush failed" +
-          (r != null ? (" for region " + r.getRegionName()) : ""),
+          (r != null ? (" for region " + Bytes.toString(r.getRegionName())) : ""),
           ex);
         if (!server.checkFileSystem()) {
           break;
@@ -239,7 +240,7 @@
       return false;
     } catch (IOException ex) {
       LOG.error("Cache flush failed"
-          + (region != null ? (" for region " + region.getRegionName()) : ""),
+          + (region != null ? (" for region " + Bytes.toString(region.getRegionName())) : ""),
           RemoteExceptionHandler.checkIOException(ex));
       if (!server.checkFileSystem()) {
         return false;
@@ -269,7 +270,7 @@
   private synchronized void flushSomeRegions() {
     // keep flushing until we hit the low water mark
     long globalMemcacheSize = -1;
-    ArrayList<HRegion> regionsToCompact = new ArrayList();
+    ArrayList<HRegion> regionsToCompact = new ArrayList<HRegion>();
     for (SortedMap<Long, HRegion> m =
         this.server.getCopyOfOnlineRegionsSortedBySize();
       (globalMemcacheSize = server.getGlobalMemcacheSize()) >=

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java Thu Mar 19 09:03:08 2009
@@ -102,7 +102,7 @@
   private int maxFilesToCompact;
   private final long desiredMaxFileSize;
   private volatile long storeSize = 0L;
-  private final Integer flushLock = new Integer(0);
+  private final Object flushLock = new Object();
   final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
   final byte [] storeName;
   private final String storeNameStr;
@@ -126,7 +126,7 @@
   private volatile long maxSeqId = -1;
 
   private final Path compactionDir;
-  private final Integer compactLock = new Integer(0);
+  private final Object compactLock = new Object();
   private final int compactionThreshold;
   private final int blocksize;
   private final boolean bloomfilter;
@@ -255,7 +255,7 @@
       // but this is probably not what we want long term.  If we got here there
       // has been data-loss
       LOG.warn("Exception processing reconstruction log " + reconstructionLog +
-        " opening " + this.storeName +
+        " opening " + Bytes.toString(this.storeName) +
         " -- continuing.  Probably lack-of-HADOOP-1700 causing DATA LOSS!", e);
     } catch (IOException e) {
       // Presume we got here because of some HDFS issue. Don't just keep going.
@@ -263,7 +263,7 @@
       // again until human intervention but alternative has us skipping logs
       // and losing edits: HBASE-642.
       LOG.warn("Exception processing reconstruction log " + reconstructionLog +
-        " opening " + this.storeName, e);
+        " opening " + Bytes.toString(this.storeName), e);
       throw e;
     }
   }
@@ -1799,7 +1799,7 @@
   /*
    * Datastructure that holds size and row to split a file around.
    */
-  class StoreSize {
+  static class StoreSize {
     private final long size;
     private final byte[] key;
     StoreSize(long size, byte[] key) {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Thu Mar 19 09:03:08 2009
@@ -158,7 +158,7 @@
    * @return Calculated path to parent region file.
    * @throws IOException
    */
-  static Path getReferredToFile(final Path p) throws IOException {
+  static Path getReferredToFile(final Path p) {
     Matcher m = REF_NAME_PARSER.matcher(p.getName());
     if (m == null || !m.matches()) {
       LOG.warn("Failed match of store file name " + p.toString());
@@ -252,6 +252,7 @@
       super(fs, path, cache);
     }
 
+    @Override
     protected String toStringFirstKey() {
       String result = "";
       try {
@@ -262,6 +263,7 @@
       return result;
     }
 
+    @Override
     protected String toStringLastKey() {
       String result = "";
       try {
@@ -287,6 +289,7 @@
       return super.toString() + (isTop()? ", half=top": ", half=bottom");
     }
 
+    @Override
     protected String toStringFirstKey() {
       String result = "";
       try {
@@ -297,6 +300,7 @@
       return result;
     }
 
+    @Override
     protected String toStringLastKey() {
       String result = "";
       try {
@@ -320,7 +324,6 @@
 
   /**
    * @throws IOException
-   * @see #open()
    */
   public synchronized void close() throws IOException {
     if (this.reader != null) {
@@ -329,6 +332,7 @@
     }
   }
 
+  @Override
   public String toString() {
     return this.path.toString() +
       (isReference()? "-" + this.referencePath + "-" + reference.toString(): "");
@@ -388,6 +392,7 @@
    * @param blocksize
    * @param algorithm Pass null to get default.
    * @param c Pass null to get default.
+   * @param bloomfilter 
    * @return HFile.Writer
    * @throws IOException
    */

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java Thu Mar 19 09:03:08 2009
@@ -191,7 +191,7 @@
   }
 
   // Data stucture to hold next, viable row (and timestamp).
-  class ViableRow {
+  static class ViableRow {
     private final byte [] row;
     private final long ts;
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java Thu Mar 19 09:03:08 2009
@@ -283,7 +283,7 @@
       try {
         scanners[i].close();
       } catch (IOException e) {
-        LOG.warn(store.storeName + " failed closing scanner " + i, e);
+        LOG.warn(Bytes.toString(store.storeName) + " failed closing scanner " + i, e);
       }
     } finally {
       scanners[i] = null;

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java?rev=755878&r1=755877&r2=755878&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java Thu Mar 19 09:03:08 2009
@@ -30,8 +30,6 @@
 import org.apache.hadoop.metrics.Updater;
 import org.apache.hadoop.metrics.jvm.JvmMetrics;
 import org.apache.hadoop.metrics.util.MetricsIntValue;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
-
 
 /** 
  * This class is for maintaining the various regionserver statistics
@@ -96,6 +94,7 @@
   /**
    * Since this object is a registered updater, this method will be called
    * periodically, e.g. every 5 seconds.
+   * @param unused 
    */
   public void doUpdates(MetricsContext unused) {
     synchronized (this) {