You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2010/05/07 21:17:55 UTC

svn commit: r942184 [6/15] - in /hadoop/hbase/branches/0.20: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/io/ src/java/org/apache/hadoop/hbase/io...

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java Fri May  7 19:17:48 2010
@@ -61,28 +61,28 @@ import org.apache.hadoop.util.StringUtil
 /** An abstract IPC service.  IPC calls take a single {@link Writable} as a
  * parameter, and return a {@link Writable} as their value.  A service runs on
  * a port and is defined by a parameter class and a value class.
- * 
- * 
+ *
+ *
  * <p>Copied local so can fix HBASE-900.
- * 
+ *
  * @see HBaseClient
  */
 public abstract class HBaseServer {
-  
+
   /**
    * The first four bytes of Hadoop RPC connections
    */
   public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes());
-  
+
   // 1 : Introduce ping and server does not throw away RPCs
-  // 3 : RPC was refactored in 0.19 
+  // 3 : RPC was refactored in 0.19
   public static final byte CURRENT_VERSION = 3;
-  
+
   /**
    * How many calls/handler are allowed in the queue.
    */
   private static final int MAX_QUEUE_SIZE_PER_HANDLER = 100;
-  
+
   public static final Log LOG =
     LogFactory.getLog("org.apache.hadoop.ipc.HBaseServer");
 
@@ -98,13 +98,13 @@ public abstract class HBaseServer {
   public static HBaseServer get() {
     return SERVER.get();
   }
- 
+
   /** This is set to Call object before Handler invokes an RPC and reset
    * after the call returns.
    */
   protected static final ThreadLocal<Call> CurCall = new ThreadLocal<Call>();
-  
-  /** Returns the remote side ip address when invoked inside an RPC 
+
+  /** Returns the remote side ip address when invoked inside an RPC
    *  Returns null incase of an error.
    *  @return InetAddress
    */
@@ -124,23 +124,23 @@ public abstract class HBaseServer {
     return (addr == null) ? null : addr.getHostAddress();
   }
 
-  protected String bindAddress; 
+  protected String bindAddress;
   protected int port;                             // port we listen on
   private int handlerCount;                       // number of handler threads
   protected Class<? extends Writable> paramClass; // class of call parameters
-  protected int maxIdleTime;                      // the maximum idle time after 
+  protected int maxIdleTime;                      // the maximum idle time after
                                                   // which a client may be
                                                   // disconnected
   protected int thresholdIdleConnections;         // the number of idle
-                                                  // connections after which we 
-                                                  // will start cleaning up idle 
+                                                  // connections after which we
+                                                  // will start cleaning up idle
                                                   // connections
-  int maxConnectionsToNuke;                       // the max number of 
+  int maxConnectionsToNuke;                       // the max number of
                                                   // connections to nuke
                                                   // during a cleanup
-  
+
   protected HBaseRpcMetrics  rpcMetrics;
-  
+
   protected Configuration conf;
 
   private int maxQueueSize;
@@ -151,7 +151,7 @@ public abstract class HBaseServer {
   volatile protected boolean running = true;         // true while server runs
   protected BlockingQueue<Call> callQueue; // queued calls
 
-  protected List<Connection> connectionList = 
+  protected List<Connection> connectionList =
     Collections.synchronizedList(new LinkedList<Connection>());
   //maintain a list
   //of client connections
@@ -162,7 +162,7 @@ public abstract class HBaseServer {
   protected HBaseRPCErrorHandler errorHandler = null;
 
   /**
-   * A convenience method to bind to a given address and report 
+   * A convenience method to bind to a given address and report
    * better exceptions if the address is not a valid host.
    * @param socket the socket to bind
    * @param address the address to bind to
@@ -171,13 +171,13 @@ public abstract class HBaseServer {
    * @throws UnknownHostException if the address isn't a valid host name
    * @throws IOException other random errors from bind
    */
-  public static void bind(ServerSocket socket, InetSocketAddress address, 
+  public static void bind(ServerSocket socket, InetSocketAddress address,
                           int backlog) throws IOException {
     try {
       socket.bind(address, backlog);
     } catch (BindException e) {
       BindException bindException =
-        new BindException("Problem binding to " + address + " : " + 
+        new BindException("Problem binding to " + address + " : " +
             e.getMessage());
       bindException.initCause(e);
       throw bindException;
@@ -185,7 +185,7 @@ public abstract class HBaseServer {
       // If they try to bind to a different host's address, give a better
       // error message.
       if ("Unresolved address".equals(e.getMessage())) {
-        throw new UnknownHostException("Invalid hostname for server: " + 
+        throw new UnknownHostException("Invalid hostname for server: " +
                                        address.getHostName());
       }
       throw e;
@@ -208,7 +208,7 @@ public abstract class HBaseServer {
       this.timestamp = System.currentTimeMillis();
       this.response = null;
     }
-    
+
     @Override
     public String toString() {
       return param.toString() + " from " + connection.toString();
@@ -221,17 +221,17 @@ public abstract class HBaseServer {
 
   /** Listens on the socket. Creates jobs for the handler threads*/
   private class Listener extends Thread {
-    
+
     private ServerSocketChannel acceptChannel = null; //the accept channel
     private Selector selector = null; //the selector that we use for the server
     private InetSocketAddress address; //the address we bind at
     private Random rand = new Random();
     private long lastCleanupRunTime = 0; //the last time when a cleanup connec-
                                          //-tion (for idle connections) ran
-    private long cleanupInterval = 10000; //the minimum interval between 
+    private long cleanupInterval = 10000; //the minimum interval between
                                           //two cleanup runs
     private int backlogLength = conf.getInt("ipc.server.listen.queue.size", 128);
-    
+
     public Listener() throws IOException {
       address = new InetSocketAddress(bindAddress, port);
       // Create a new server socket and set to non blocking mode
@@ -252,7 +252,7 @@ public abstract class HBaseServer {
     /** cleanup connections from connectionList. Choose a random range
      * to scan and also have a limit on the number of the connections
      * that will be cleanedup per run. The criteria for cleanup is the time
-     * for which the connection was idle. If 'force' is true then all 
+     * for which the connection was idle. If 'force' is true then all
      * connections will be looked at for the cleanup.
      */
     private void cleanupConnections(boolean force) {
@@ -331,7 +331,7 @@ public abstract class HBaseServer {
             }
           } else {
             // we can run out of memory if we have too many threads
-            // log the event and sleep for a minute and give 
+            // log the event and sleep for a minute and give
             // some thread(s) a chance to finish
             LOG.warn("Out of Memory in server select", e);
             closeCurrentConnection(key);
@@ -358,7 +358,7 @@ public abstract class HBaseServer {
 
         selector= null;
         acceptChannel= null;
-        
+
         // clean up all connections
         while (!connectionList.isEmpty()) {
           closeConnection(connectionList.remove(0));
@@ -381,7 +381,7 @@ public abstract class HBaseServer {
     InetSocketAddress getAddress() {
       return (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
     }
-    
+
     void doAccept(SelectionKey key) throws IOException, OutOfMemoryError {
       Connection c = null;
       ServerSocketChannel server = (ServerSocketChannel) key.channel();
@@ -411,10 +411,10 @@ public abstract class HBaseServer {
       int count = 0;
       Connection c = (Connection)key.attachment();
       if (c == null) {
-        return;  
+        return;
       }
       c.setLastContact(System.currentTimeMillis());
-      
+
       try {
         count = c.readAndProcess();
       } catch (InterruptedException ieo) {
@@ -425,7 +425,7 @@ public abstract class HBaseServer {
       }
       if (count < 0) {
         if (LOG.isDebugEnabled())
-          LOG.debug(getName() + ": disconnecting client " + 
+          LOG.debug(getName() + ": disconnecting client " +
                     c.getHostAddress() + ". Number of active connections: "+
                     numConnections);
         closeConnection(c);
@@ -434,7 +434,7 @@ public abstract class HBaseServer {
       else {
         c.setLastContact(System.currentTimeMillis());
       }
-    }   
+    }
 
     synchronized void doStop() {
       if (selector != null) {
@@ -455,7 +455,7 @@ public abstract class HBaseServer {
   private class Responder extends Thread {
     private Selector writeSelector;
     private int pending;         // connections waiting to register
-    
+
     final static int PURGE_INTERVAL = 900000; // 15mins
 
     Responder() throws IOException {
@@ -498,7 +498,7 @@ public abstract class HBaseServer {
           //
           LOG.debug("Checking for old call responses.");
           ArrayList<Call> calls;
-          
+
           // get the list of channels from list of keys.
           synchronized (writeSelector.keys()) {
             calls = new ArrayList<Call>(writeSelector.keys().size());
@@ -506,12 +506,12 @@ public abstract class HBaseServer {
             while (iter.hasNext()) {
               SelectionKey key = iter.next();
               Call call = (Call)key.attachment();
-              if (call != null && key.channel() == call.connection.channel) { 
+              if (call != null && key.channel() == call.connection.channel) {
                 calls.add(call);
               }
             }
           }
-          
+
           for(Call call : calls) {
             doPurge(call, now);
           }
@@ -531,7 +531,7 @@ public abstract class HBaseServer {
             try { Thread.sleep(60000); } catch (Exception ie) {}
       }
         } catch (Exception e) {
-          LOG.warn("Exception in Responder " + 
+          LOG.warn("Exception in Responder " +
                    StringUtils.stringifyException(e));
         }
       }
@@ -564,7 +564,7 @@ public abstract class HBaseServer {
     }
 
     //
-    // Remove calls that have been pending in the responseQueue 
+    // Remove calls that have been pending in the responseQueue
     // for a long time.
     //
     private void doPurge(Call call, long now) {
@@ -629,18 +629,18 @@ public abstract class HBaseServer {
             }
           } else {
             //
-            // If we were unable to write the entire response out, then 
-            // insert in Selector queue. 
+            // If we were unable to write the entire response out, then
+            // insert in Selector queue.
             //
             call.connection.responseQueue.addFirst(call);
-            
+
             if (inHandler) {
               // set the serve time when the response has to be sent later
               call.timestamp = System.currentTimeMillis();
-              
+
               incPending();
               try {
-                // Wakeup the thread blocked on select, only then can the call 
+                // Wakeup the thread blocked on select, only then can the call
                 // to channel.register() complete.
                 writeSelector.wakeup();
                 channel.register(writeSelector, SelectionKey.OP_WRITE, call);
@@ -653,7 +653,7 @@ public abstract class HBaseServer {
             }
             if (LOG.isDebugEnabled()) {
               LOG.debug(getName() + ": responding to #" + call.id + " from " +
-                        call.connection + " Wrote partial " + numBytes + 
+                        call.connection + " Wrote partial " + numBytes +
                         " bytes.");
             }
           }
@@ -711,7 +711,7 @@ public abstract class HBaseServer {
     private long lastContact;
     private int dataLength;
     protected Socket socket;
-    // Cache the remote host & port info so that even if the socket is 
+    // Cache the remote host & port info so that even if the socket is
     // disconnected, we can say where it used to connect to.
     private String hostAddress;
     private int remotePort;
@@ -739,13 +739,13 @@ public abstract class HBaseServer {
                    socketSendBufferSize);
         }
       }
-    }   
+    }
 
     @Override
     public String toString() {
-      return getHostAddress() + ":" + remotePort; 
+      return getHostAddress() + ":" + remotePort;
     }
-    
+
     public String getHostAddress() {
       return hostAddress;
     }
@@ -762,17 +762,17 @@ public abstract class HBaseServer {
     private boolean isIdle() {
       return rpcCount == 0;
     }
-    
+
     /* Decrement the outstanding RPC count */
     protected void decRpcCount() {
       rpcCount--;
     }
-    
+
     /* Increment the outstanding RPC count */
     private void incRpcCount() {
       rpcCount++;
     }
-    
+
     protected boolean timedOut(long currentTime) {
       if (isIdle() && currentTime -  lastContact > maxIdleTime)
         return true;
@@ -783,14 +783,14 @@ public abstract class HBaseServer {
       while (true) {
         /* Read at most one RPC. If the header is not read completely yet
          * then iterate until we read first RPC or until there is no data left.
-         */    
+         */
         int count = -1;
         if (dataLengthBuffer.remaining() > 0) {
-          count = channelRead(channel, dataLengthBuffer);       
-          if (count < 0 || dataLengthBuffer.remaining() > 0) 
+          count = channelRead(channel, dataLengthBuffer);
+          if (count < 0 || dataLengthBuffer.remaining() > 0)
             return count;
         }
-      
+
         if (!versionRead) {
           //Every connection is expected to send the header.
           ByteBuffer versionBuffer = ByteBuffer.allocate(1);
@@ -799,13 +799,13 @@ public abstract class HBaseServer {
             return count;
           }
           int version = versionBuffer.get(0);
-          
-          dataLengthBuffer.flip();          
+
+          dataLengthBuffer.flip();
           if (!HEADER.equals(dataLengthBuffer) || version != CURRENT_VERSION) {
             //Warning is ok since this is not supposed to happen.
-            LOG.warn("Incorrect header or version mismatch from " + 
+            LOG.warn("Incorrect header or version mismatch from " +
                      hostAddress + ":" + remotePort +
-                     " got version " + version + 
+                     " got version " + version +
                      " expected version " + CURRENT_VERSION);
             return -1;
           }
@@ -813,11 +813,11 @@ public abstract class HBaseServer {
           versionRead = true;
           continue;
         }
-        
+
         if (data == null) {
           dataLengthBuffer.flip();
           dataLength = dataLengthBuffer.getInt();
-       
+
           if (dataLength == HBaseClient.PING_CALL_ID) {
             dataLengthBuffer.clear();
             return 0;  //ping message
@@ -825,9 +825,9 @@ public abstract class HBaseServer {
           data = ByteBuffer.allocate(dataLength);
           incRpcCount();  // Increment the rpc count
         }
-        
+
         count = channelRead(channel, data);
-        
+
         if (data.remaining() == 0) {
           dataLengthBuffer.clear();
           data.flip();
@@ -840,7 +840,7 @@ public abstract class HBaseServer {
           headerRead = true;
           data = null;
           continue;
-        } 
+        }
         return count;
       }
     }
@@ -854,18 +854,18 @@ public abstract class HBaseServer {
         new DataInputStream(new ByteArrayInputStream(data.array()));
       ticket = (UserGroupInformation) ObjectWritable.readObject(in, conf);
     }
-    
+
     private void processData() throws  IOException, InterruptedException {
       DataInputStream dis =
         new DataInputStream(new ByteArrayInputStream(data.array()));
       int id = dis.readInt();                    // try to read an id
-        
+
       if (LOG.isDebugEnabled())
         LOG.debug(" got #" + id);
-            
+
       Writable param = ReflectionUtils.newInstance(paramClass, conf);           // read param
-      param.readFields(dis);        
-        
+      param.readFields(dis);
+
       Call call = new Call(id, param, this);
       callQueue.put(call);              // queue the call; maybe blocked here
     }
@@ -903,11 +903,11 @@ public abstract class HBaseServer {
           if (LOG.isDebugEnabled())
             LOG.debug(getName() + ": has #" + call.id + " from " +
                       call.connection);
-          
+
           String errorClass = null;
           String error = null;
           Writable value = null;
-          
+
           CurCall.set(call);
           UserGroupInformation previous = UserGroupInformation.getCurrentUGI();
           UserGroupInformation.setCurrentUGI(call.connection.ticket);
@@ -965,22 +965,22 @@ public abstract class HBaseServer {
     }
 
   }
-  
+
   protected HBaseServer(String bindAddress, int port,
-                  Class<? extends Writable> paramClass, int handlerCount, 
+                  Class<? extends Writable> paramClass, int handlerCount,
                   Configuration conf)
-    throws IOException 
+    throws IOException
   {
     this(bindAddress, port, paramClass, handlerCount,  conf, Integer.toString(port));
   }
   /** Constructs a server listening on the named port and address.  Parameters passed must
    * be of the named class.  The <code>handlerCount</handlerCount> determines
    * the number of handler threads that will be used to process calls.
-   * 
+   *
    */
-  protected HBaseServer(String bindAddress, int port, 
-                  Class<? extends Writable> paramClass, int handlerCount, 
-                  Configuration conf, String serverName) 
+  protected HBaseServer(String bindAddress, int port,
+                  Class<? extends Writable> paramClass, int handlerCount,
+                  Configuration conf, String serverName)
     throws IOException {
     this.bindAddress = bindAddress;
     this.conf = conf;
@@ -989,14 +989,14 @@ public abstract class HBaseServer {
     this.handlerCount = handlerCount;
     this.socketSendBufferSize = 0;
     this.maxQueueSize = handlerCount * MAX_QUEUE_SIZE_PER_HANDLER;
-    this.callQueue  = new LinkedBlockingQueue<Call>(maxQueueSize); 
+    this.callQueue  = new LinkedBlockingQueue<Call>(maxQueueSize);
     this.maxIdleTime = 2*conf.getInt("ipc.client.connection.maxidletime", 1000);
     this.maxConnectionsToNuke = conf.getInt("ipc.client.kill.max", 10);
     this.thresholdIdleConnections = conf.getInt("ipc.client.idlethreshold", 4000);
-    
+
     // Start the listener here and let it bind to the port
     listener = new Listener();
-    this.port = listener.getAddress().getPort();    
+    this.port = listener.getAddress().getPort();
     this.rpcMetrics = new HBaseRpcMetrics(serverName,
                           Integer.toString(this.port));
     this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false);
@@ -1013,7 +1013,7 @@ public abstract class HBaseServer {
     }
     connection.close();
   }
-  
+
   /** Sets the socket buffer size used for responding to RPCs.
    * @param size
    */
@@ -1024,7 +1024,7 @@ public abstract class HBaseServer {
     responder.start();
     listener.start();
     handlers = new Handler[handlerCount];
-    
+
     for (int i = 0; i < handlerCount; i++) {
       handlers[i] = new Handler(i);
       handlers[i].start();
@@ -1069,11 +1069,11 @@ public abstract class HBaseServer {
   public synchronized InetSocketAddress getListenerAddress() {
     return listener.getAddress();
   }
-  
-  /** Called for each call. 
-   * @param param 
-   * @param receiveTime 
-   * @return Writable 
+
+  /** Called for each call.
+   * @param param
+   * @param receiveTime
+   * @return Writable
    * @throws IOException
    */
   public abstract Writable call(Writable param, long receiveTime)
@@ -1086,7 +1086,7 @@ public abstract class HBaseServer {
   public int getNumOpenConnections() {
     return numConnections;
   }
-  
+
   /**
    * The number of rpc calls in the queue.
    * @return The number of rpc calls in the queue.
@@ -1101,26 +1101,26 @@ public abstract class HBaseServer {
    */
   public void setErrorHandler(HBaseRPCErrorHandler handler) {
     this.errorHandler = handler;
-  }      
+  }
 
   /**
-   * When the read or write buffer size is larger than this limit, i/o will be 
+   * When the read or write buffer size is larger than this limit, i/o will be
    * done in chunks of this size. Most RPC requests and responses would be
    * be smaller.
    */
   private static int NIO_BUFFER_LIMIT = 8*1024; //should not be more than 64KB.
-  
+
   /**
    * This is a wrapper around {@link WritableByteChannel#write(ByteBuffer)}.
-   * If the amount of data is large, it writes to channel in smaller chunks. 
-   * This is to avoid jdk from creating many direct buffers as the size of 
+   * If the amount of data is large, it writes to channel in smaller chunks.
+   * This is to avoid jdk from creating many direct buffers as the size of
    * buffer increases. This also minimizes extra copies in NIO layer
-   * as a result of multiple write operations required to write a large 
-   * buffer.  
+   * as a result of multiple write operations required to write a large
+   * buffer.
    *
    * @see WritableByteChannel#write(ByteBuffer)
    */
-  protected static int channelWrite(WritableByteChannel channel, 
+  protected static int channelWrite(WritableByteChannel channel,
                                     ByteBuffer buffer) throws IOException {
     return (buffer.remaining() <= NIO_BUFFER_LIMIT) ?
            channel.write(buffer) : channelIO(null, channel, buffer);
@@ -1128,13 +1128,13 @@ public abstract class HBaseServer {
 
   /**
    * This is a wrapper around {@link ReadableByteChannel#read(ByteBuffer)}.
-   * If the amount of data is large, it writes to channel in smaller chunks. 
-   * This is to avoid jdk from creating many direct buffers as the size of 
+   * If the amount of data is large, it writes to channel in smaller chunks.
+   * This is to avoid jdk from creating many direct buffers as the size of
    * ByteBuffer increases. There should not be any performance degredation.
-   * 
+   *
    * @see ReadableByteChannel#read(ByteBuffer)
    */
-  protected static int channelRead(ReadableByteChannel channel, 
+  protected static int channelRead(ReadableByteChannel channel,
                                    ByteBuffer buffer) throws IOException {
     return (buffer.remaining() <= NIO_BUFFER_LIMIT) ?
            channel.read(buffer) : channelIO(channel, null, buffer);
@@ -1144,35 +1144,35 @@ public abstract class HBaseServer {
    * Helper for {@link #channelRead(ReadableByteChannel, ByteBuffer)}
    * and {@link #channelWrite(WritableByteChannel, ByteBuffer)}. Only
    * one of readCh or writeCh should be non-null.
-   * 
+   *
    * @see #channelRead(ReadableByteChannel, ByteBuffer)
    * @see #channelWrite(WritableByteChannel, ByteBuffer)
    */
-  private static int channelIO(ReadableByteChannel readCh, 
+  private static int channelIO(ReadableByteChannel readCh,
                                WritableByteChannel writeCh,
                                ByteBuffer buf) throws IOException {
-    
+
     int originalLimit = buf.limit();
     int initialRemaining = buf.remaining();
     int ret = 0;
-    
+
     while (buf.remaining() > 0) {
       try {
         int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT);
         buf.limit(buf.position() + ioSize);
-        
-        ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf); 
-        
+
+        ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf);
+
         if (ret < ioSize) {
           break;
         }
 
       } finally {
-        buf.limit(originalLimit);        
+        buf.limit(originalLimit);
       }
     }
 
-    int nBytes = initialRemaining - buf.remaining(); 
+    int nBytes = initialRemaining - buf.remaining();
     return (nBytes > 0) ? nBytes : ret;
-  }      
+  }
 }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java Fri May  7 19:17:48 2010
@@ -31,27 +31,27 @@ import org.apache.hadoop.io.Writable;
  * Clients interact with the HMasterInterface to gain access to meta-level
  * HBase functionality, like finding an HRegionServer and creating/destroying
  * tables.
- * 
+ *
  * <p>NOTE: if you change the interface, you must change the RPC version
  * number in HBaseRPCProtocolVersion
- * 
+ *
  */
 public interface HMasterInterface extends HBaseRPCProtocolVersion {
 
   /** @return true if master is available */
   public boolean isMasterRunning();
-  
+
   // Admin tools would use these cmds
-  
+
   /**
    * Creates a new table.  If splitKeys are specified, then the table will be
    * created with an initial set of multiple regions.  If splitKeys is null,
    * the table will be created with a single region.
    * @param desc table descriptor
-   * @param splitKeys 
+   * @param splitKeys
    * @throws IOException
    */
-  public void createTable(HTableDescriptor desc, byte [][] splitKeys) 
+  public void createTable(HTableDescriptor desc, byte [][] splitKeys)
   throws IOException;
 
   /**
@@ -60,7 +60,7 @@ public interface HMasterInterface extend
    * @throws IOException
    */
   public void deleteTable(final byte [] tableName) throws IOException;
-  
+
   /**
    * Adds a column to the specified table
    * @param tableName
@@ -77,8 +77,8 @@ public interface HMasterInterface extend
    * @param descriptor new column descriptor
    * @throws IOException
    */
-  public void modifyColumn(final byte [] tableName, final byte [] columnName, 
-    HColumnDescriptor descriptor) 
+  public void modifyColumn(final byte [] tableName, final byte [] columnName,
+    HColumnDescriptor descriptor)
   throws IOException;
 
 
@@ -90,17 +90,17 @@ public interface HMasterInterface extend
    */
   public void deleteColumn(final byte [] tableName, final byte [] columnName)
   throws IOException;
-  
+
   /**
    * Puts the table on-line (only needed if table has been previously taken offline)
    * @param tableName
    * @throws IOException
    */
   public void enableTable(final byte [] tableName) throws IOException;
-  
+
   /**
    * Take table offline
-   * 
+   *
    * @param tableName
    * @throws IOException
    */
@@ -108,7 +108,7 @@ public interface HMasterInterface extend
 
   /**
    * Modify a table's metadata
-   * 
+   *
    * @param tableName
    * @param op
    * @param args

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java Fri May  7 19:17:48 2010
@@ -27,13 +27,13 @@ import org.apache.hadoop.hbase.HMsg;
 import org.apache.hadoop.hbase.HRegionInfo;
 
 /**
- * HRegionServers interact with the HMasterRegionInterface to report on local 
+ * HRegionServers interact with the HMasterRegionInterface to report on local
  * goings-on and to obtain data-handling instructions from the HMaster.
  * <p>Changes here need to be reflected in HbaseObjectWritable HbaseRPC#Invoker.
- * 
+ *
  * <p>NOTE: if you change the interface, you must change the RPC version
  * number in HBaseRPCProtocolVersion
- * 
+ *
  */
 public interface HMasterRegionInterface extends HBaseRPCProtocolVersion {
 
@@ -49,16 +49,16 @@ public interface HMasterRegionInterface 
   /**
    * Called to renew lease, tell master what the region server is doing and to
    * receive new instructions from the master
-   * 
+   *
    * @param info server's address and start code
    * @param msgs things the region server wants to tell the master
-   * @param mostLoadedRegions Array of HRegionInfos that should contain the 
+   * @param mostLoadedRegions Array of HRegionInfos that should contain the
    * reporting server's most loaded regions. These are candidates for being
    * rebalanced.
    * @return instructions from the master to the region server
    * @throws IOException
    */
-  public HMsg[] regionServerReport(HServerInfo info, HMsg msgs[], 
+  public HMsg[] regionServerReport(HServerInfo info, HMsg msgs[],
     HRegionInfo mostLoadedRegions[])
   throws IOException;
 }
\ No newline at end of file

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java Fri May  7 19:17:48 2010
@@ -35,27 +35,27 @@ import org.apache.hadoop.hbase.regionser
 
 /**
  * Clients interact with HRegionServers using a handle to the HRegionInterface.
- * 
+ *
  * <p>NOTE: if you change the interface, you must change the RPC version
  * number in HBaseRPCProtocolVersion
- * 
+ *
  */
 public interface HRegionInterface extends HBaseRPCProtocolVersion {
-  /** 
+  /**
    * Get metainfo about an HRegion
-   * 
+   *
    * @param regionName name of the region
    * @return HRegionInfo object for region
    * @throws NotServingRegionException
    */
   public HRegionInfo getRegionInfo(final byte [] regionName)
   throws NotServingRegionException;
-  
+
 
   /**
-   * Return all the data for the row that matches <i>row</i> exactly, 
+   * Return all the data for the row that matches <i>row</i> exactly,
    * or the one that immediately preceeds it.
-   * 
+   *
    * @param regionName region name
    * @param row row key
    * @param family Column family to look for row in.
@@ -67,11 +67,11 @@ public interface HRegionInterface extend
   throws IOException;
 
   /**
-   * 
+   *
    * @return the regions served by this regionserver
    */
   public HRegion [] getOnlineRegionsAsArray();
-  
+
   /**
    * Perform Get operation.
    * @param regionName name of region to get from
@@ -91,17 +91,17 @@ public interface HRegionInterface extend
   public boolean exists(byte [] regionName, Get get) throws IOException;
 
   /**
-   * Put data into the specified region 
+   * Put data into the specified region
    * @param regionName
    * @param put the data to be put
    * @throws IOException
    */
   public void put(final byte [] regionName, final Put put)
   throws IOException;
-  
+
   /**
    * Put an array of puts into the specified region
-   * 
+   *
    * @param regionName
    * @param puts
    * @return The number of processed put's.  Returns -1 if all Puts
@@ -112,7 +112,7 @@ public interface HRegionInterface extend
   throws IOException;
 
   /**
-   * Deletes all the KeyValues that match those found in the Delete object, 
+   * Deletes all the KeyValues that match those found in the Delete object,
    * if their ts <= to the Delete. In case of a delete with a specific ts it
    * only deletes that specific KeyValue.
    * @param regionName
@@ -124,7 +124,7 @@ public interface HRegionInterface extend
 
   /**
    * Put an array of deletes into the specified region
-   * 
+   *
    * @param regionName
    * @param deletes
    * @return The number of processed deletes.  Returns -1 if all Deletes
@@ -137,7 +137,7 @@ public interface HRegionInterface extend
   /**
    * Atomically checks if a row/family/qualifier value match the expectedValue.
    * If it does, it adds the put.
-   * 
+   *
    * @param regionName
    * @param row
    * @param family
@@ -147,15 +147,15 @@ public interface HRegionInterface extend
    * @throws IOException
    * @return true if the new put was execute, false otherwise
    */
-  public boolean checkAndPut(final byte[] regionName, final byte [] row, 
+  public boolean checkAndPut(final byte[] regionName, final byte [] row,
       final byte [] family, final byte [] qualifier, final byte [] value,
       final Put put)
   throws IOException;
-  
+
   /**
    * Atomically increments a column value. If the column value isn't long-like,
    * this could throw an exception.
-   * 
+   *
    * @param regionName
    * @param row
    * @param family
@@ -165,18 +165,18 @@ public interface HRegionInterface extend
    * @return new incremented column value
    * @throws IOException
    */
-  public long incrementColumnValue(byte [] regionName, byte [] row, 
+  public long incrementColumnValue(byte [] regionName, byte [] row,
       byte [] family, byte [] qualifier, long amount, boolean writeToWAL)
   throws IOException;
-  
-  
+
+
   //
   // remote scanner interface
   //
 
   /**
    * Opens a remote scanner with a RowFilter.
-   * 
+   *
    * @param regionName name of region to scan
    * @param scan configured scan object
    * @return scannerId scanner identifier used in other calls
@@ -184,7 +184,7 @@ public interface HRegionInterface extend
    */
   public long openScanner(final byte [] regionName, final Scan scan)
   throws IOException;
-  
+
   /**
    * Get the next set of values
    * @param scannerId clientId passed to openScanner
@@ -192,7 +192,7 @@ public interface HRegionInterface extend
    * @throws IOException
    */
   public Result next(long scannerId) throws IOException;
-  
+
   /**
    * Get the next set of values
    * @param scannerId clientId passed to openScanner
@@ -203,10 +203,10 @@ public interface HRegionInterface extend
    * @throws IOException
    */
   public Result [] next(long scannerId, int numberOfRows) throws IOException;
-  
+
   /**
    * Close a scanner
-   * 
+   *
    * @param scannerId the scanner id returned by openScanner
    * @throws IOException
    */
@@ -232,15 +232,15 @@ public interface HRegionInterface extend
    */
   public void unlockRow(final byte [] regionName, final long lockId)
   throws IOException;
-  
-  
+
+
   /**
    * Method used when a master is taking the place of another failed one.
    * @return All regions assigned on this region server
    * @throws IOException
    */
   public HRegionInfo[] getRegionsAssignment() throws IOException;
-  
+
   /**
    * Method used when a master is taking the place of another failed one.
    * @return The HSI

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/BuildTableIndex.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/BuildTableIndex.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/BuildTableIndex.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/BuildTableIndex.java Fri May  7 19:17:48 2010
@@ -35,7 +35,7 @@ import org.apache.hadoop.mapred.JobConf;
  * Example table column indexing class.  Runs a mapreduce job to index
  * specified table columns.
  * <ul><li>Each row is modeled as a Lucene document: row key is indexed in
- * its untokenized form, column name-value pairs are Lucene field name-value 
+ * its untokenized form, column name-value pairs are Lucene field name-value
  * pairs.</li>
  * <li>A file passed on command line is used to populate an
  * {@link IndexConfiguration} which is used to set various Lucene parameters,

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/Driver.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/Driver.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/Driver.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/Driver.java Fri May  7 19:17:48 2010
@@ -29,7 +29,7 @@ import org.apache.hadoop.util.ProgramDri
 public class Driver {
   /**
    * @param args
-   * @throws Throwable 
+   * @throws Throwable
    */
   public static void main(String[] args) throws Throwable {
     ProgramDriver pgd = new ProgramDriver();

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java Fri May  7 19:17:48 2010
@@ -44,12 +44,12 @@ extends MapReduceBase
 implements TableMap<ImmutableBytesWritable,RowResult> {
 
   /**
-   * JobConf parameter to specify the columns used to produce the key passed to 
+   * JobConf parameter to specify the columns used to produce the key passed to
    * collect from the map phase
    */
   public static final String GROUP_COLUMNS =
     "hbase.mapred.groupingtablemap.columns";
-  
+
   protected byte [][] m_columns;
 
   /**
@@ -64,9 +64,9 @@ implements TableMap<ImmutableBytesWritab
    * @param job job configuration object
    */
   @SuppressWarnings("unchecked")
-  public static void initJob(String table, String columns, String groupColumns, 
+  public static void initJob(String table, String columns, String groupColumns,
     Class<? extends TableMap> mapper, JobConf job) {
-    
+
     TableMapReduceUtil.initTableMapJob(table, columns, mapper,
         ImmutableBytesWritable.class, RowResult.class, job);
     job.set(GROUP_COLUMNS, groupColumns);
@@ -84,19 +84,19 @@ implements TableMap<ImmutableBytesWritab
 
   /**
    * Extract the grouping columns from value to construct a new key.
-   * 
+   *
    * Pass the new key and value to reduce.
    * If any of the grouping columns are not found in the value, the record is skipped.
-   * @param key 
-   * @param value 
-   * @param output 
-   * @param reporter 
-   * @throws IOException 
+   * @param key
+   * @param value
+   * @param output
+   * @param reporter
+   * @throws IOException
    */
-  public void map(ImmutableBytesWritable key, RowResult value, 
+  public void map(ImmutableBytesWritable key, RowResult value,
       OutputCollector<ImmutableBytesWritable,RowResult> output,
       Reporter reporter) throws IOException {
-    
+
     byte[][] keyVals = extractKeyValues(value);
     if(keyVals != null) {
       ImmutableBytesWritable tKey = createGroupKey(keyVals);
@@ -107,9 +107,9 @@ implements TableMap<ImmutableBytesWritab
   /**
    * Extract columns values from the current record. This method returns
    * null if any of the columns are not found.
-   * 
+   *
    * Override this method if you want to deal with nulls differently.
-   * 
+   *
    * @param r
    * @return array of byte values
    */
@@ -135,9 +135,9 @@ implements TableMap<ImmutableBytesWritab
   }
 
   /**
-   * Create a key by concatenating multiple column values. 
+   * Create a key by concatenating multiple column values.
    * Override this function in order to produce different types of keys.
-   * 
+   *
    * @param vals
    * @return key generated by concatenating multiple column values
    */

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java Fri May  7 19:17:48 2010
@@ -35,25 +35,25 @@ import org.apache.hadoop.mapred.Partitio
  * This is used to partition the output keys into groups of keys.
  * Keys are grouped according to the regions that currently exist
  * so that each reducer fills a single region so load is distributed.
- * 
+ *
  * @param <K2>
  * @param <V2>
  */
 @Deprecated
-public class HRegionPartitioner<K2,V2> 
+public class HRegionPartitioner<K2,V2>
 implements Partitioner<ImmutableBytesWritable, V2> {
   private final Log LOG = LogFactory.getLog(TableInputFormat.class);
   private HTable table;
-  private byte[][] startKeys; 
-  
+  private byte[][] startKeys;
+
   public void configure(JobConf job) {
     try {
-      this.table = new HTable(new HBaseConfiguration(job), 
+      this.table = new HTable(new HBaseConfiguration(job),
         job.get(TableOutputFormat.OUTPUT_TABLE));
     } catch (IOException e) {
       LOG.error(e);
     }
-    
+
     try {
       this.startKeys = this.table.getStartKeys();
     } catch (IOException e) {
@@ -79,7 +79,7 @@ implements Partitioner<ImmutableBytesWri
       if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){
         if (i >= numPartitions-1){
           // cover if we have less reduces then regions.
-          return (Integer.toString(i).hashCode() 
+          return (Integer.toString(i).hashCode()
               & Integer.MAX_VALUE) % numPartitions;
         }
         return i;

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java Fri May  7 19:17:48 2010
@@ -44,7 +44,7 @@ implements TableMap<ImmutableBytesWritab
   /**
    * Use this before submitting a TableMap job. It will
    * appropriately set up the JobConf.
-   * 
+   *
    * @param table table name
    * @param columns columns to scan
    * @param mapper mapper class
@@ -60,17 +60,17 @@ implements TableMap<ImmutableBytesWritab
 
   /**
    * Pass the key, value to reduce
-   * @param key 
-   * @param value 
-   * @param output 
-   * @param reporter 
-   * @throws IOException 
+   * @param key
+   * @param value
+   * @param output
+   * @param reporter
+   * @throws IOException
    */
   public void map(ImmutableBytesWritable key, RowResult value,
       OutputCollector<ImmutableBytesWritable,RowResult> output,
       Reporter reporter) throws IOException {
-    
-    // convert 
+
+    // convert
     output.collect(key, value);
   }
 }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java Fri May  7 19:17:48 2010
@@ -40,20 +40,20 @@ implements TableReduce<ImmutableBytesWri
   @SuppressWarnings("unused")
   private static final Log LOG =
     LogFactory.getLog(IdentityTableReduce.class.getName());
-  
+
   /**
    * No aggregation, output pairs of (key, record)
-   * @param key 
-   * @param values 
-   * @param output 
-   * @param reporter 
-   * @throws IOException 
+   * @param key
+   * @param values
+   * @param output
+   * @param reporter
+   * @throws IOException
    */
   public void reduce(ImmutableBytesWritable key, Iterator<BatchUpdate> values,
       OutputCollector<ImmutableBytesWritable, BatchUpdate> output,
       Reporter reporter)
       throws IOException {
-    
+
     while(values.hasNext()) {
       output.collect(key, values.next());
     }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java Fri May  7 19:17:48 2010
@@ -105,7 +105,7 @@ public class IndexOutputFormat extends
       boolean closed;
       private long docCount = 0;
 
-      public void write(ImmutableBytesWritable key, 
+      public void write(ImmutableBytesWritable key,
           LuceneDocumentWrapper value)
       throws IOException {
         // unwrap and index doc

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IndexTableReduce.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IndexTableReduce.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IndexTableReduce.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IndexTableReduce.java Fri May  7 19:17:48 2010
@@ -98,7 +98,7 @@ public class IndexTableReduce extends Ma
             Field.Index.NO;
 
         // UTF-8 encode value
-        Field field = new Field(column, Bytes.toString(columnValue), 
+        Field field = new Field(column, Bytes.toString(columnValue),
           store, index);
         field.setBoost(indexConf.getBoost(column));
         field.setOmitNorms(indexConf.isOmitNorms(column));

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/RowCounter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/RowCounter.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/RowCounter.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/RowCounter.java Fri May  7 19:17:48 2010
@@ -39,7 +39,7 @@ import org.apache.hadoop.util.ToolRunner
 
 /**
  * A job with a map to count rows.
- * Map outputs table rows IF the input row has columns that have content.  
+ * Map outputs table rows IF the input row has columns that have content.
  * Uses an {@link IdentityReducer}
  */
 @Deprecated
@@ -108,13 +108,13 @@ public class RowCounter extends Configur
     FileOutputFormat.setOutputPath(c, new Path(args[0]));
     return c;
   }
-  
+
   static int printUsage() {
     System.out.println(NAME +
       " <outputdir> <tablename> <column1> [<column2>...]");
     return -1;
   }
-  
+
   public int run(final String[] args) throws Exception {
     // Make sure there are at least 3 parameters
     if (args.length < 3) {

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java Fri May  7 19:17:48 2010
@@ -221,7 +221,7 @@ implements InputFormat<ImmutableBytesWri
       try {
         result = this.scanner.next();
       } catch (UnknownScannerException e) {
-        LOG.debug("recovered from " + StringUtils.stringifyException(e));  
+        LOG.debug("recovered from " + StringUtils.stringifyException(e));
         restart(lastRow);
         this.scanner.next();    // skip presumed already mapped row
         result = this.scanner.next();
@@ -299,7 +299,7 @@ implements InputFormat<ImmutableBytesWri
       int lastPos = startPos + middle;
       lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos;
       String regionLocation = table.getRegionLocation(startKeys[startPos]).
-        getServerAddress().getHostname(); 
+        getServerAddress().getHostname();
       splits[i] = new TableSplit(this.table.getTableName(),
         startKeys[startPos], ((i + 1) < realNumSplits) ? startKeys[lastPos]:
           HConstants.EMPTY_START_ROW, regionLocation);

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java Fri May  7 19:17:48 2010
@@ -36,11 +36,11 @@ import org.apache.hadoop.mapred.JobConf;
 @Deprecated
 @SuppressWarnings("unchecked")
 public class TableMapReduceUtil {
-  
+
   /**
    * Use this before submitting a TableMap job. It will
    * appropriately set up the JobConf.
-   * 
+   *
    * @param table  The table name to read from.
    * @param columns  The columns to scan.
    * @param mapper  The mapper class to use.
@@ -49,10 +49,10 @@ public class TableMapReduceUtil {
    * @param job  The current job configuration to adjust.
    */
   public static void initTableMapJob(String table, String columns,
-    Class<? extends TableMap> mapper, 
-    Class<? extends WritableComparable> outputKeyClass, 
+    Class<? extends TableMap> mapper,
+    Class<? extends WritableComparable> outputKeyClass,
     Class<? extends Writable> outputValueClass, JobConf job) {
-      
+
     job.setInputFormat(TableInputFormat.class);
     job.setMapOutputValueClass(outputValueClass);
     job.setMapOutputKeyClass(outputKeyClass);
@@ -60,15 +60,15 @@ public class TableMapReduceUtil {
     FileInputFormat.addInputPaths(job, table);
     job.set(TableInputFormat.COLUMN_LIST, columns);
   }
-  
+
   /**
    * Use this before submitting a TableReduce job. It will
    * appropriately set up the JobConf.
-   * 
+   *
    * @param table  The output table.
    * @param reducer  The reducer class to use.
    * @param job  The current job configuration to adjust.
-   * @throws IOException When determining the region count fails. 
+   * @throws IOException When determining the region count fails.
    */
   public static void initTableReduceJob(String table,
     Class<? extends TableReduce> reducer, JobConf job)
@@ -79,13 +79,13 @@ public class TableMapReduceUtil {
   /**
    * Use this before submitting a TableReduce job. It will
    * appropriately set up the JobConf.
-   * 
+   *
    * @param table  The output table.
    * @param reducer  The reducer class to use.
    * @param job  The current job configuration to adjust.
-   * @param partitioner  Partitioner to use. Pass <code>null</code> to use 
+   * @param partitioner  Partitioner to use. Pass <code>null</code> to use
    * default partitioner.
-   * @throws IOException When determining the region count fails. 
+   * @throws IOException When determining the region count fails.
    */
   public static void initTableReduceJob(String table,
     Class<? extends TableReduce> reducer, JobConf job, Class partitioner)
@@ -106,17 +106,17 @@ public class TableMapReduceUtil {
       job.setPartitionerClass(partitioner);
     }
   }
-  
+
   /**
-   * Ensures that the given number of reduce tasks for the given job 
-   * configuration does not exceed the number of regions for the given table. 
-   * 
+   * Ensures that the given number of reduce tasks for the given job
+   * configuration does not exceed the number of regions for the given table.
+   *
    * @param table  The table to get the region count for.
    * @param job  The current job configuration to adjust.
    * @throws IOException When retrieving the table details fails.
    */
-  public static void limitNumReduceTasks(String table, JobConf job) 
-  throws IOException { 
+  public static void limitNumReduceTasks(String table, JobConf job)
+  throws IOException {
     HTable outputTable = new HTable(new HBaseConfiguration(job), table);
     int regions = outputTable.getRegionsInfo().size();
     if (job.getNumReduceTasks() > regions)
@@ -124,15 +124,15 @@ public class TableMapReduceUtil {
   }
 
   /**
-   * Ensures that the given number of map tasks for the given job 
-   * configuration does not exceed the number of regions for the given table. 
-   * 
+   * Ensures that the given number of map tasks for the given job
+   * configuration does not exceed the number of regions for the given table.
+   *
    * @param table  The table to get the region count for.
    * @param job  The current job configuration to adjust.
    * @throws IOException When retrieving the table details fails.
    */
-  public static void limitNumMapTasks(String table, JobConf job) 
-  throws IOException { 
+  public static void limitNumMapTasks(String table, JobConf job)
+  throws IOException {
     HTable outputTable = new HTable(new HBaseConfiguration(job), table);
     int regions = outputTable.getRegionsInfo().size();
     if (job.getNumMapTasks() > regions)
@@ -140,30 +140,30 @@ public class TableMapReduceUtil {
   }
 
   /**
-   * Sets the number of reduce tasks for the given job configuration to the 
-   * number of regions the given table has. 
-   * 
+   * Sets the number of reduce tasks for the given job configuration to the
+   * number of regions the given table has.
+   *
    * @param table  The table to get the region count for.
    * @param job  The current job configuration to adjust.
    * @throws IOException When retrieving the table details fails.
    */
-  public static void setNumReduceTasks(String table, JobConf job) 
-  throws IOException { 
+  public static void setNumReduceTasks(String table, JobConf job)
+  throws IOException {
     HTable outputTable = new HTable(new HBaseConfiguration(job), table);
     int regions = outputTable.getRegionsInfo().size();
     job.setNumReduceTasks(regions);
   }
-  
+
   /**
-   * Sets the number of map tasks for the given job configuration to the 
-   * number of regions the given table has. 
-   * 
+   * Sets the number of map tasks for the given job configuration to the
+   * number of regions the given table has.
+   *
    * @param table  The table to get the region count for.
    * @param job  The current job configuration to adjust.
    * @throws IOException When retrieving the table details fails.
    */
-  public static void setNumMapTasks(String table, JobConf job) 
-  throws IOException { 
+  public static void setNumMapTasks(String table, JobConf job)
+  throws IOException {
     HTable outputTable = new HTable(new HBaseConfiguration(job), table);
     int regions = outputTable.getRegionsInfo().size();
     job.setNumMapTasks(regions);
@@ -173,7 +173,7 @@ public class TableMapReduceUtil {
    * Sets the number of rows to return and cache with each scanner iteration.
    * Higher caching values will enable faster mapreduce jobs at the expense of
    * requiring more heap to contain the cached rows.
-   * 
+   *
    * @param job The current job configuration to adjust.
    * @param batchSize The number of rows to return in batch with each scanner
    * iteration.

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java Fri May  7 19:17:48 2010
@@ -48,7 +48,7 @@ FileOutputFormat<ImmutableBytesWritable,
   private final Log LOG = LogFactory.getLog(TableOutputFormat.class);
 
   /**
-   * Convert Reduce output (key, value) to (HStoreKey, KeyedDataArrayWritable) 
+   * Convert Reduce output (key, value) to (HStoreKey, KeyedDataArrayWritable)
    * and write to an HBase table
    */
   protected static class TableRecordWriter
@@ -57,14 +57,14 @@ FileOutputFormat<ImmutableBytesWritable,
 
     /**
      * Instantiate a TableRecordWriter with the HBase HClient for writing.
-     * 
+     *
      * @param table
      */
     public TableRecordWriter(HTable table) {
       m_table = table;
     }
 
-    public void close(Reporter reporter) 
+    public void close(Reporter reporter)
       throws IOException {
       m_table.flushCommits();
     }
@@ -74,14 +74,14 @@ FileOutputFormat<ImmutableBytesWritable,
       m_table.commit(new BatchUpdate(value));
     }
   }
-  
+
   @Override
   @SuppressWarnings("unchecked")
   public RecordWriter getRecordWriter(FileSystem ignored,
       JobConf job, String name, Progressable progress) throws IOException {
-    
+
     // expecting exactly one path
-    
+
     String tableName = job.get(OUTPUT_TABLE);
     HTable table = null;
     try {
@@ -97,7 +97,7 @@ FileOutputFormat<ImmutableBytesWritable,
   @Override
   public void checkOutputSpecs(FileSystem ignored, JobConf job)
   throws FileAlreadyExistsException, InvalidJobConfException, IOException {
-    
+
     String tableName = job.get(OUTPUT_TABLE);
     if(tableName == null) {
       throw new IOException("Must specify table name");

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/package-info.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/package-info.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/package-info.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/package-info.java Fri May  7 19:17:48 2010
@@ -99,7 +99,7 @@ below. If running the reduce step makes 
 to have lots of reducers so load is spread across the hbase cluster.</p>
 
 <p>There is also a new hbase partitioner that will run as many reducers as
-currently existing regions.  The 
+currently existing regions.  The
 {@link org.apache.hadoop.hbase.mapred.HRegionPartitioner} is suitable
 when your table is large and your upload is not such that it will greatly
 alter the number of existing regions when done; other use the default
@@ -133,7 +133,7 @@ Read the class comment for specification
 <h3>Example to bulk import/load a text file into an HTable
 </h3>
 
-<p>Here's a sample program from 
+<p>Here's a sample program from
 <a href="http://www.spicylogic.com/allenday/blog/category/computing/distributed-systems/hadoop/hbase/">Allen Day</a>
 that takes an HDFS text file path and an HBase table name as inputs, and loads the contents of the text file to the table
 all up in the map phase.
@@ -180,12 +180,12 @@ public class BulkImport implements Tool 
     throws IOException {
       if ( table == null )
         throw new IOException("table is null");
-      
+
       // Split input line on tab character
       String [] splits = value.toString().split("\t");
       if ( splits.length != 4 )
         return;
-      
+
       String rowID = splits[0];
       int timestamp  = Integer.parseInt( splits[1] );
       String colID = splits[2];
@@ -198,8 +198,8 @@ public class BulkImport implements Tool 
       if ( timestamp > 0 )
         bu.setTimestamp( timestamp );
 
-      bu.put(colID, cellValue.getBytes());      
-      table.commit( bu );      
+      bu.put(colID, cellValue.getBytes());
+      table.commit( bu );
     }
 
     public void configure(JobConf job) {
@@ -212,7 +212,7 @@ public class BulkImport implements Tool 
       }
     }
   }
-  
+
   public JobConf createSubmittableJob(String[] args) {
     JobConf c = new JobConf(getConf(), BulkImport.class);
     c.setJobName(NAME);
@@ -224,7 +224,7 @@ public class BulkImport implements Tool 
     c.setOutputFormat(NullOutputFormat.class);
     return c;
   }
-  
+
   static int printUsage() {
     System.err.println("Usage: " + NAME + " &lt;input> &lt;table_name>");
     System.err.println("\twhere &lt;input> is a tab-delimited text file with 4 columns.");
@@ -233,7 +233,7 @@ public class BulkImport implements Tool 
     System.err.println("\t\tcolumn 3 = column ID");
     System.err.println("\t\tcolumn 4 = cell value");
     return -1;
-  } 
+  }
 
   public int run(@SuppressWarnings("unused") String[] args) throws Exception {
     // Make sure there are exactly 3 parameters left.
@@ -246,7 +246,7 @@ public class BulkImport implements Tool 
 
   public Configuration getConf() {
     return this.conf;
-  } 
+  }
 
   public void setConf(final Configuration c) {
     this.conf = c;

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/BuildTableIndex.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/BuildTableIndex.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/BuildTableIndex.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/BuildTableIndex.java Fri May  7 19:17:48 2010
@@ -36,7 +36,7 @@ import org.apache.hadoop.util.GenericOpt
  * Example table column indexing class.  Runs a mapreduce job to index
  * specified table columns.
  * <ul><li>Each row is modeled as a Lucene document: row key is indexed in
- * its untokenized form, column name-value pairs are Lucene field name-value 
+ * its untokenized form, column name-value pairs are Lucene field name-value
  * pairs.</li>
  * <li>A file passed on command line is used to populate an
  * {@link IndexConfiguration} which is used to set various Lucene parameters,
@@ -60,7 +60,7 @@ public class BuildTableIndex {
 
   /**
    * Prints the usage message and exists the program.
-   * 
+   *
    * @param message  The message to print first.
    */
   private static void printUsage(String message) {
@@ -71,12 +71,12 @@ public class BuildTableIndex {
 
   /**
    * Creates a new job.
-   * @param conf 
-   * 
+   * @param conf
+   *
    * @param args  The command line arguments.
    * @throws IOException When reading the configuration fails.
    */
-  public static Job createSubmittableJob(Configuration conf, String[] args) 
+  public static Job createSubmittableJob(Configuration conf, String[] args)
   throws IOException {
     if (args.length < 6) {
       printUsage("Too few arguments");
@@ -129,7 +129,7 @@ public class BuildTableIndex {
     Scan scan = new Scan();
     scan.addColumns(columnNames.toString());
     // use identity map (a waste, but just as an example)
-    IdentityTableMapper.initJob(tableName, scan, 
+    IdentityTableMapper.initJob(tableName, scan,
       IdentityTableMapper.class, job);
     // use IndexTableReduce to build a Lucene index
     job.setReducerClass(IndexTableReducer.class);
@@ -142,7 +142,7 @@ public class BuildTableIndex {
    * Reads xml file of indexing configurations.  The xml format is similar to
    * hbase-default.xml and hadoop-default.xml. For an example configuration,
    * see the <code>createIndexConfContent</code> method in TestTableIndex.
-   * 
+   *
    * @param fileName  The file to read.
    * @return XML configuration read from file.
    * @throws IOException When the XML is broken.
@@ -177,16 +177,16 @@ public class BuildTableIndex {
 
   /**
    * The main entry point.
-   * 
+   *
    * @param args  The command line arguments.
    * @throws Exception When running the job fails.
    */
   public static void main(String[] args) throws Exception {
     HBaseConfiguration conf = new HBaseConfiguration();
-    String[] otherArgs = 
+    String[] otherArgs =
       new GenericOptionsParser(conf, args).getRemainingArgs();
     Job job = createSubmittableJob(conf, otherArgs);
     System.exit(job.waitForCompletion(true) ? 0 : 1);
   }
-  
+
 }
\ No newline at end of file

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Driver.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Driver.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Driver.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Driver.java Fri May  7 19:17:48 2010
@@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.migration
 public class Driver {
   /**
    * @param args
-   * @throws Throwable 
+   * @throws Throwable
    */
   public static void main(String[] args) throws Throwable {
     ProgramDriver pgd = new ProgramDriver();

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Export.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Export.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Export.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Export.java Fri May  7 19:17:48 2010
@@ -51,7 +51,7 @@ public class Export {
      * @param value  The columns.
      * @param context  The current context.
      * @throws IOException When something is broken with the data.
-     * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, 
+     * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN,
      *   org.apache.hadoop.mapreduce.Mapper.Context)
      */
     @Override
@@ -68,7 +68,7 @@ public class Export {
 
   /**
    * Sets up the actual job.
-   * 
+   *
    * @param conf  The current configuration.
    * @param args  The command line parameters.
    * @return The newly created job.
@@ -114,7 +114,7 @@ public class Export {
 
   /**
    * Main entry point.
-   * 
+   *
    * @param args  The command line parameters.
    * @throws Exception When running the job fails.
    */

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java Fri May  7 19:17:48 2010
@@ -40,31 +40,31 @@ public class GroupingTableMapper
 extends TableMapper<ImmutableBytesWritable,Result> implements Configurable {
 
   /**
-   * JobConf parameter to specify the columns used to produce the key passed to 
+   * JobConf parameter to specify the columns used to produce the key passed to
    * collect from the map phase.
    */
   public static final String GROUP_COLUMNS =
     "hbase.mapred.groupingtablemap.columns";
-  
+
   /** The grouping columns. */
   protected byte [][] columns;
   /** The current configuration. */
   private Configuration conf = null;
-  
+
   /**
-   * Use this before submitting a TableMap job. It will appropriately set up 
+   * Use this before submitting a TableMap job. It will appropriately set up
    * the job.
    *
    * @param table The table to be processed.
    * @param scan  The scan with the columns etc.
-   * @param groupColumns  A space separated list of columns used to form the 
+   * @param groupColumns  A space separated list of columns used to form the
    * key used in collect.
    * @param mapper  The mapper class.
    * @param job  The current job.
    * @throws IOException When setting up the job fails.
    */
   @SuppressWarnings("unchecked")
-  public static void initJob(String table, Scan scan, String groupColumns, 
+  public static void initJob(String table, Scan scan, String groupColumns,
     Class<? extends TableMapper> mapper, Job job) throws IOException {
     TableMapReduceUtil.initTableMapperJob(table, scan, mapper,
         ImmutableBytesWritable.class, Result.class, job);
@@ -72,18 +72,18 @@ extends TableMapper<ImmutableBytesWritab
   }
 
   /**
-   * Extract the grouping columns from value to construct a new key. Pass the 
-   * new key and value to reduce. If any of the grouping columns are not found 
+   * Extract the grouping columns from value to construct a new key. Pass the
+   * new key and value to reduce. If any of the grouping columns are not found
    * in the value, the record is skipped.
-   * 
-   * @param key  The current key. 
+   *
+   * @param key  The current key.
    * @param value  The current value.
-   * @param context  The current context. 
+   * @param context  The current context.
    * @throws IOException When writing the record fails.
    * @throws InterruptedException When the job is aborted.
    */
   @Override
-  public void map(ImmutableBytesWritable key, Result value, Context context) 
+  public void map(ImmutableBytesWritable key, Result value, Context context)
   throws IOException, InterruptedException {
     byte[][] keyVals = extractKeyValues(value);
     if(keyVals != null) {
@@ -97,7 +97,7 @@ extends TableMapper<ImmutableBytesWritab
    * null if any of the columns are not found.
    * <p>
    * Override this method if you want to deal with nulls differently.
-   * 
+   *
    * @param r  The current values.
    * @return Array of byte values.
    */
@@ -124,9 +124,9 @@ extends TableMapper<ImmutableBytesWritab
 
   /**
    * Create a key by concatenating multiple column values.
-   * <p> 
+   * <p>
    * Override this function in order to produce different types of keys.
-   * 
+   *
    * @param vals  The current key/values.
    * @return A key generated by concatenating multiple column values.
    */
@@ -150,7 +150,7 @@ extends TableMapper<ImmutableBytesWritab
 
   /**
    * Returns the current configuration.
-   *  
+   *
    * @return The current configuration.
    * @see org.apache.hadoop.conf.Configurable#getConf()
    */
@@ -161,7 +161,7 @@ extends TableMapper<ImmutableBytesWritab
 
   /**
    * Sets the configuration. This is used to set up the grouping details.
-   * 
+   *
    * @param configuration  The configuration to set.
    * @see org.apache.hadoop.conf.Configurable#setConf(
    *   org.apache.hadoop.conf.Configuration)
@@ -175,5 +175,5 @@ extends TableMapper<ImmutableBytesWritab
       columns[i] = Bytes.toBytes(cols[i]);
     }
   }
-  
+
 }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java Fri May  7 19:17:48 2010
@@ -50,7 +50,7 @@ import org.mortbay.log.Log;
 public class HFileOutputFormat extends FileOutputFormat<ImmutableBytesWritable, KeyValue> {
   public RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter(TaskAttemptContext context)
   throws IOException, InterruptedException {
-    // Get the path of the temporary output file 
+    // Get the path of the temporary output file
     final Path outputPath = FileOutputFormat.getOutputPath(context);
     final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();
     Configuration conf = context.getConfiguration();
@@ -127,7 +127,7 @@ public class HFileOutputFormat extends F
   }
 
   /*
-   * Data structure to hold a Writer and amount of data written on it. 
+   * Data structure to hold a Writer and amount of data written on it.
    */
   static class WriterLength {
     long written = 0;

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java Fri May  7 19:17:48 2010
@@ -35,23 +35,23 @@ import org.apache.hadoop.mapreduce.Parti
  * This is used to partition the output keys into groups of keys.
  * Keys are grouped according to the regions that currently exist
  * so that each reducer fills a single region so load is distributed.
- * 
+ *
  * @param <KEY>  The type of the key.
  * @param <VALUE>  The type of the value.
  */
-public class HRegionPartitioner<KEY, VALUE> 
+public class HRegionPartitioner<KEY, VALUE>
 extends Partitioner<ImmutableBytesWritable, VALUE>
 implements Configurable {
-  
+
   private final Log LOG = LogFactory.getLog(TableInputFormat.class);
   private Configuration conf = null;
   private HTable table;
-  private byte[][] startKeys; 
-  
+  private byte[][] startKeys;
+
   /**
-   * Gets the partition number for a given key (hence record) given the total 
+   * Gets the partition number for a given key (hence record) given the total
    * number of partitions i.e. number of reduce-tasks for the job.
-   *   
+   *
    * <p>Typically a hash function on a all or a subset of the key.</p>
    *
    * @param key  The key to be partitioned.
@@ -80,7 +80,7 @@ implements Configurable {
       if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){
         if (i >= numPartitions-1){
           // cover if we have less reduces then regions.
-          return (Integer.toString(i).hashCode() 
+          return (Integer.toString(i).hashCode()
               & Integer.MAX_VALUE) % numPartitions;
         }
         return i;
@@ -92,7 +92,7 @@ implements Configurable {
 
   /**
    * Returns the current configuration.
-   *  
+   *
    * @return The current configuration.
    * @see org.apache.hadoop.conf.Configurable#getConf()
    */
@@ -104,7 +104,7 @@ implements Configurable {
   /**
    * Sets the configuration. This is used to determine the start keys for the
    * given table.
-   * 
+   *
    * @param configuration  The configuration to set.
    * @see org.apache.hadoop.conf.Configurable#setConf(
    *   org.apache.hadoop.conf.Configuration)
@@ -113,7 +113,7 @@ implements Configurable {
   public void setConf(Configuration configuration) {
     this.conf = configuration;
     try {
-      this.table = new HTable(new HBaseConfiguration(conf), 
+      this.table = new HTable(new HBaseConfiguration(conf),
         configuration.get(TableOutputFormat.OUTPUT_TABLE));
     } catch (IOException e) {
       LOG.error(e);

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java Fri May  7 19:17:48 2010
@@ -33,9 +33,9 @@ public class IdentityTableMapper
 extends TableMapper<ImmutableBytesWritable, Result> {
 
   /**
-   * Use this before submitting a TableMap job. It will appropriately set up 
+   * Use this before submitting a TableMap job. It will appropriately set up
    * the job.
-   * 
+   *
    * @param table  The table name.
    * @param scan  The scan with the columns to scan.
    * @param mapper  The mapper class.
@@ -51,16 +51,16 @@ extends TableMapper<ImmutableBytesWritab
 
   /**
    * Pass the key, value to reduce.
-   * 
-   * @param key  The current key. 
+   *
+   * @param key  The current key.
    * @param value  The current value.
-   * @param context  The current context. 
+   * @param context  The current context.
    * @throws IOException When writing the record fails.
    * @throws InterruptedException When the job is aborted.
    */
-  public void map(ImmutableBytesWritable key, Result value, Context context) 
+  public void map(ImmutableBytesWritable key, Result value, Context context)
   throws IOException, InterruptedException {
     context.write(key, value);
   }
-  
+
 }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java Fri May  7 19:17:48 2010
@@ -27,44 +27,44 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.mapreduce.OutputFormat;
 
 /**
- * Convenience class that simply writes all values (which must be 
- * {@link org.apache.hadoop.hbase.client.Put Put} or 
+ * Convenience class that simply writes all values (which must be
+ * {@link org.apache.hadoop.hbase.client.Put Put} or
  * {@link org.apache.hadoop.hbase.client.Delete Delete} instances)
- * passed to it out to the configured HBase table. This works in combination 
+ * passed to it out to the configured HBase table. This works in combination
  * with {@link TableOutputFormat} which actually does the writing to HBase.<p>
- *  
+ *
  * Keys are passed along but ignored in TableOutputFormat.  However, they can
  * be used to control how your values will be divided up amongst the specified
  * number of reducers. <p>
- * 
- * You can also use the {@link TableMapReduceUtil} class to set up the two 
+ *
+ * You can also use the {@link TableMapReduceUtil} class to set up the two
  * classes in one step:
  * <blockquote><code>
  * TableMapReduceUtil.initTableReducerJob("table", IdentityTableReducer.class, job);
  * </code></blockquote>
  * This will also set the proper {@link TableOutputFormat} which is given the
- * <code>table</code> parameter. The 
- * {@link org.apache.hadoop.hbase.client.Put Put} or 
+ * <code>table</code> parameter. The
+ * {@link org.apache.hadoop.hbase.client.Put Put} or
  * {@link org.apache.hadoop.hbase.client.Delete Delete} define the
  * row and columns implicitly.
  */
-public class IdentityTableReducer 
+public class IdentityTableReducer
 extends TableReducer<Writable, Writable, Writable> {
 
   @SuppressWarnings("unused")
   private static final Log LOG = LogFactory.getLog(IdentityTableReducer.class);
-  
+
   /**
-   * Writes each given record, consisting of the row key and the given values, 
-   * to the configured {@link OutputFormat}. It is emitting the row key and each 
-   * {@link org.apache.hadoop.hbase.client.Put Put} or 
-   * {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs. 
-   * 
-   * @param key  The current row key. 
-   * @param values  The {@link org.apache.hadoop.hbase.client.Put Put} or 
-   *   {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given 
+   * Writes each given record, consisting of the row key and the given values,
+   * to the configured {@link OutputFormat}. It is emitting the row key and each
+   * {@link org.apache.hadoop.hbase.client.Put Put} or
+   * {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs.
+   *
+   * @param key  The current row key.
+   * @param values  The {@link org.apache.hadoop.hbase.client.Put Put} or
+   *   {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given
    *   row.
-   * @param context  The context of the reduce. 
+   * @param context  The context of the reduce.
    * @throws IOException When writing the record fails.
    * @throws InterruptedException When the job gets interrupted.
    */

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Import.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Import.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Import.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Import.java Fri May  7 19:17:48 2010
@@ -49,7 +49,7 @@ public class Import {
      * @param value  The columns.
      * @param context  The current context.
      * @throws IOException When something is broken with the data.
-     * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, 
+     * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN,
      *   org.apache.hadoop.mapreduce.Mapper.Context)
      */
     @Override
@@ -63,7 +63,7 @@ public class Import {
       }
     }
 
-    private static Put resultToPut(ImmutableBytesWritable key, Result result) 
+    private static Put resultToPut(ImmutableBytesWritable key, Result result)
     throws IOException {
       Put put = new Put(key.get());
       for (KeyValue kv : result.raw()) {
@@ -75,13 +75,13 @@ public class Import {
 
   /**
    * Sets up the actual job.
-   * 
+   *
    * @param conf  The current configuration.
    * @param args  The command line parameters.
    * @return The newly created job.
    * @throws IOException When setting up the job fails.
    */
-  public static Job createSubmittableJob(Configuration conf, String[] args) 
+  public static Job createSubmittableJob(Configuration conf, String[] args)
   throws IOException {
     String tableName = args[0];
     Path inputDir = new Path(args[1]);
@@ -109,7 +109,7 @@ public class Import {
 
   /**
    * Main entry point.
-   * 
+   *
    * @param args  The command line parameters.
    * @throws Exception When running the job fails.
    */

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexConfiguration.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexConfiguration.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexConfiguration.java Fri May  7 19:17:48 2010
@@ -47,7 +47,7 @@ import org.w3c.dom.Text;
  * Configuration parameters for building a Lucene index.
  */
 public class IndexConfiguration extends Configuration {
-  
+
   private static final Log LOG = LogFactory.getLog(IndexConfiguration.class);
 
   static final String HBASE_COLUMN_NAME = "hbase.column.name";

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexOutputFormat.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexOutputFormat.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexOutputFormat.java Fri May  7 19:17:48 2010
@@ -38,9 +38,9 @@ import org.apache.lucene.search.Similari
  * Create a local index, unwrap Lucene documents created by reduce, add them to
  * the index, and copy the index to the destination.
  */
-public class IndexOutputFormat 
+public class IndexOutputFormat
 extends FileOutputFormat<ImmutableBytesWritable, LuceneDocumentWrapper> {
-  
+
   static final Log LOG = LogFactory.getLog(IndexOutputFormat.class);
 
   /** Random generator. */
@@ -48,7 +48,7 @@ extends FileOutputFormat<ImmutableBytesW
 
   /**
    * Returns the record writer.
-   * 
+   *
    * @param context  The current task context.
    * @return The record writer.
    * @throws IOException When there is an issue with the writer.
@@ -59,7 +59,7 @@ extends FileOutputFormat<ImmutableBytesW
     getRecordWriter(TaskAttemptContext context)
   throws IOException {
 
-    final Path perm = new Path(FileOutputFormat.getOutputPath(context), 
+    final Path perm = new Path(FileOutputFormat.getOutputPath(context),
       FileOutputFormat.getUniqueFile(context, "part", ""));
     // null for "dirsProp" means no predefined directories
     final Path temp = context.getConfiguration().getLocalPath(
@@ -109,5 +109,5 @@ extends FileOutputFormat<ImmutableBytesW
     writer.setUseCompoundFile(indexConf.isUseCompoundFile());
     return new IndexRecordWriter(context, fs, writer, indexConf, perm, temp);
   }
-  
+
 }