You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by li...@apache.org on 2014/03/12 22:17:20 UTC

svn commit: r1576909 [8/18] - in /hbase/branches/0.89-fb/src: ./ examples/thrift/ main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/avro/ main/java/org/apache/hadoop/hbase/avro/generated/ main/java/org/apache/hadoop/hbase/client/ mai...

Copied: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/ThriftClientInterface.java (from r1576907, hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java)
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/ThriftClientInterface.java?p2=hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/ThriftClientInterface.java&p1=hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java&r1=1576907&r2=1576909&rev=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/ThriftClientInterface.java Wed Mar 12 21:17:13 2014
@@ -1,5 +1,5 @@
-/**
- * Copyright 2010 The Apache Software Foundation
+/*
+ * Copyright The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -17,18 +17,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
+package org.apache.hadoop.hbase.ipc;
 
 /**
- * This exception is thrown by the master when a region server reports and is
- * already being processed as dead. This can happen when a region server loses
- * its session but didn't figure it yet.
+ * TODO: documentation
+ *
+ * For now empty, maybe we should extract the common methods in the interfaces
+ * who will extend this interface
+ *
  */
-public class YouAreDeadException extends IOException {
+public interface ThriftClientInterface extends AutoCloseable {
 
-  public YouAreDeadException(String message) {
-    super(message);
-  }
 }

Added: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/ThriftHRegionInterface.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/ThriftHRegionInterface.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/ThriftHRegionInterface.java (added)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/ThriftHRegionInterface.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,636 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.MultiAction;
+import org.apache.hadoop.hbase.client.MultiPut;
+import org.apache.hadoop.hbase.client.MultiPutResponse;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RowLock;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TMultiResponse;
+import org.apache.hadoop.hbase.client.TRowMutations;
+import org.apache.hadoop.hbase.io.hfile.histogram.HFileHistogram.Bucket;
+import org.apache.hadoop.hbase.ipc.thrift.exceptions.ThriftHBaseException;
+import org.apache.hadoop.hbase.master.AssignmentPlan;
+
+import com.facebook.swift.codec.ThriftField;
+import com.facebook.swift.service.ThriftException;
+import com.facebook.swift.service.ThriftMethod;
+import com.facebook.swift.service.ThriftService;
+import com.google.common.util.concurrent.ListenableFuture;
+
+/**
+ * Clients interact with ThriftHRegionServers using a handle to the
+ * {@link ThriftHRegionInterface}.
+ *
+ * This interface is just to capture the swift-version of the methods in
+ * {@link HRegionInterface}.
+ *
+ */
+@ThriftService
+public interface ThriftHRegionInterface extends ThriftClientInterface {
+  /**
+   * Get metainfo about an HRegion
+   *
+   * @param regionName name of the region
+   * @return HRegionInfo object for region
+   * @throws ThriftHBaseException
+   */
+  @ThriftMethod(value = "getRegionInfo", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public HRegionInfo getRegionInfo(
+      @ThriftField(name="regionName") final byte[] regionName)
+      throws ThriftHBaseException;
+
+  /**
+   * Return all the data for the row that matches <i>row</i> exactly,
+   * or the one that immediately preceeds it.
+   *
+   * @param regionName region name
+   * @param row row key
+   * @param family Column family to look for row in.
+   * @return map of values
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "getClosestRowBefore", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public Result getClosestRowBefore(
+      @ThriftField(name="regionName") final byte[] regionName,
+      @ThriftField(name="row") final byte[] row,
+      @ThriftField(name="family") final byte[] family) throws ThriftHBaseException;
+
+
+  @ThriftMethod(value = "getClosestRowBeforeAsync", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1)})
+  public ListenableFuture<Result> getClosestRowBeforeAsync(
+      @ThriftField(name="regionName") final byte[] regionName,
+      @ThriftField(name="row") final byte[] row,
+      @ThriftField(name="family") final byte[] family);
+
+  /**
+   * Flush the given region
+   */
+  @ThriftMethod(value = "flushRegion", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1)})
+  public void flushRegion(@ThriftField(name="regionName") byte[] regionName)
+      throws ThriftHBaseException;
+
+  /**
+   * Flush the given region if lastFlushTime < ifOlderThanTS
+   */
+  @ThriftMethod(value = "flushRegionIfOlderThanTS", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1)})
+  public void flushRegion(@ThriftField(name="regionName") byte[] regionName,
+      @ThriftField(name="ifOlderThanTS") long ifOlderThanTS)
+      throws ThriftHBaseException;
+
+  /**
+   * Gets last flush time (in milli sec) for the given region
+   * @return the last flush time for a region
+   */
+  @ThriftMethod("getLastFlushTime")
+  public long getLastFlushTime(@ThriftField(name="regionName") byte[] regionName);
+
+  /**
+   * Gets last flush time (in milli sec) for all regions on the server
+   * @return a map of regionName to the last flush time for the region
+   */
+  @ThriftMethod(value = "getLastFlushTimes")
+  public Map<byte[], Long> getLastFlushTimes();
+
+  /**
+   * Gets the current time (in milli sec) at the region server
+   * @return time in milli seconds at the regionserver.
+   */
+  @ThriftMethod(value = "getCurrentTimeMillis")
+  public long getCurrentTimeMillis();
+
+  /**
+   * Gets the current startCode at the region server
+   * @return startCode -- time in milli seconds when the regionserver started.
+   */
+  @ThriftMethod(value = "getStartCode")
+  public long getStartCode();
+
+  /**
+   * Get a list of store files for a particular CF in a particular region
+   * @param region name
+   * @param CF name
+   * @return the list of store files
+   */
+  @ThriftMethod(value = "getStoreFileList", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1)})
+  public List<String> getStoreFileList(
+      @ThriftField(name="regionName") byte[] regionName,
+      @ThriftField(name="columnFamily") byte[] columnFamily)
+      throws ThriftHBaseException;
+
+  /**
+   * Get a list of store files for a set of CFs in a particular region
+   * @param region name
+   * @param CF names
+   * @return the list of store files
+   */
+  @ThriftMethod(value = "getStoreFileListForColumnFamilies", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public List<String> getStoreFileListForColumnFamilies(
+      @ThriftField(name="regionName") byte[] regionName,
+      @ThriftField(name="columnFamilies") List<byte[]> columnFamilies)
+      throws ThriftHBaseException;
+
+  /**
+   * Get a list of store files for all CFs in a particular region
+   * @param region name
+   * @return the list of store files
+   */
+  @ThriftMethod(value = "getStoreFileListForAllColumnFamilies", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public List<String> getStoreFileListForAllColumnFamilies(
+      @ThriftField(name="regionName") byte[] regionName)
+      throws ThriftHBaseException;
+
+  /**
+  * @param rollCurrentHLog if true, the current HLog is rolled and will be
+  * included in the list returned
+  * @return list of HLog files
+  */
+  @ThriftMethod(value = "getHLogsList",  exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public List<String> getHLogsList(
+      @ThriftField(name="rollCurrentHLog") boolean rollCurrentHLog)
+      throws ThriftHBaseException;
+
+  /**
+   * TODO: deprecate this
+   * Perform Get operation.
+   * @param regionName name of region to get from
+   * @param get Get operation
+   * @return Result
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "processGet", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public Result get(@ThriftField(name="regionName") byte[] regionName,
+      @ThriftField(name="get") Get get)
+      throws ThriftHBaseException;
+
+  @ThriftMethod(value = "getAsync", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1)})
+  public ListenableFuture<Result> getAsync(
+      @ThriftField(name="regionName") byte[] regionName,
+      @ThriftField(name="get") Get get);
+
+  @ThriftMethod(value = "getRows", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public List<Result> getRows(@ThriftField(name="regionName") byte[] regionName,
+      @ThriftField(name="gets") List<Get> gets)
+      throws ThriftHBaseException;
+
+  /**
+   * Perform exists operation.
+   * @param regionName name of region to get from
+   * @param get Get operation describing cell to test
+   * @return true if exists
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "exists", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public boolean exists(@ThriftField(name="regionName") byte [] regionName,
+      @ThriftField(name="get") Get get)
+      throws ThriftHBaseException;
+
+  /**
+   * Put data into the specified region
+   * @param regionName region name
+   * @param put the data to be put
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "processPut", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public void put(@ThriftField(name="regionName") final byte[] regionName,
+      @ThriftField(name="put") final Put put)
+      throws ThriftHBaseException;
+
+  /**
+   * Put an array of puts into the specified region
+   *
+   * @param regionName region name
+   * @param puts List of puts to execute
+   * @return The number of processed put's.  Returns -1 if all Puts
+   * processed successfully.
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "putRows", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public int putRows(@ThriftField(name="regionName") final byte[] regionName,
+      @ThriftField(name="puts") final List<Put> puts)
+      throws ThriftHBaseException;
+
+  /**
+   * Deletes all the KeyValues that match those found in the Delete object,
+   * if their ts <= to the Delete. In case of a delete with a specific ts it
+   * only deletes that specific KeyValue.
+   * @param regionName region name
+   * @param delete delete object
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "processDelete", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public void processDelete(@ThriftField(name="regionName") final byte[] regionName,
+      @ThriftField(name="deleteArg") final Delete delete)
+      throws ThriftHBaseException;
+
+  @ThriftMethod(value = "deleteAsync", exception =  {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public ListenableFuture<Void> deleteAsync(@ThriftField(name="regionName") final byte[] regionName,
+      @ThriftField(name="deleteArg") final Delete delete);
+
+  /**
+   * Put an array of deletes into the specified region
+   *
+   * @param regionName region name
+   * @param deletes delete List to execute
+   * @return The number of processed deletes.  Returns -1 if all Deletes
+   * processed successfully.
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "processListOfDeletes", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public int processListOfDeletes(
+      @ThriftField(name="regionName") final byte[] regionName,
+      @ThriftField(name="deletes") final List<Delete> deletes)
+      throws ThriftHBaseException;
+
+  /**
+   * Atomically checks if a row/family/qualifier value match the expectedValue.
+   * If it does, it adds the put. If passed expected value is null, then the
+   * check is for non-existance of the row/column.
+   *
+   * @param regionName region name
+   * @param row row to check
+   * @param family column family
+   * @param qualifier column qualifier
+   * @param value the expected value
+   * @param put data to put if check succeeds
+   * @throws ThriftHBaseException e
+   * @return true if the new put was execute, false otherwise
+   */
+  @ThriftMethod(value = "checkAndPut", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public boolean checkAndPut(
+      @ThriftField(name="regionName") final byte[] regionName,
+      @ThriftField(name="row") final byte[] row,
+      @ThriftField(name="family") final byte[] family,
+      @ThriftField(name="qualifier") final byte[] qualifier,
+      @ThriftField(name="value") final byte[] value,
+      @ThriftField(name="put") final Put put)
+      throws ThriftHBaseException;
+
+
+  /**
+   * Atomically checks if a row/family/qualifier value match the expectedValue.
+   * If it does, it adds the delete. If passed expected value is null, then the
+   * check is for non-existance of the row/column.
+   *
+   * @param regionName region name
+   * @param row row to check
+   * @param family column family
+   * @param qualifier column qualifier
+   * @param value the expected value
+   * @param delete data to delete if check succeeds
+   * @throws ThriftHBaseException e
+   * @return true if the new delete was execute, false otherwise
+   */
+  @ThriftMethod(value = "checkAndDelete", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public boolean checkAndDelete(
+      @ThriftField(name="regionName") final byte[] regionName,
+      @ThriftField(name="row") final byte[] row,
+      @ThriftField(name="family") final byte[] family,
+      @ThriftField(name="qualifier") final byte[] qualifier,
+      @ThriftField(name="value") final byte[] value,
+      @ThriftField(name="deleteArg") final Delete delete)
+      throws ThriftHBaseException;
+
+  /**
+   * Atomically increments a column value. If the column value isn't long-like,
+   * this could throw an exception. If passed expected value is null, then the
+   * check is for non-existance of the row/column.
+   *
+   * @param regionName region name
+   * @param row row to check
+   * @param family column family
+   * @param qualifier column qualifier
+   * @param amount long amount to increment
+   * @param writeToWAL whether to write the increment to the WAL
+   * @return new incremented column value
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "incrementColumnValue", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public long incrementColumnValue(
+      @ThriftField(name="regionName") byte[] regionName,
+      @ThriftField(name="row") byte[] row,
+      @ThriftField(name="family") byte[] family,
+      @ThriftField(name="qualifier") byte[] qualifier,
+      @ThriftField(name="amount") long amount,
+      @ThriftField(name="writeToWAL") boolean writeToWAL)
+      throws ThriftHBaseException;
+
+
+  //
+  // remote scanner interface
+  //
+
+  /**
+   * Opens a remote scanner with a RowFilter.
+   *
+   * @param regionName name of region to scan
+   * @param scan configured scan object
+   * @return scannerId scanner identifier used in other calls
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "openScanner", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public long openScanner(@ThriftField(name="regionName") final byte[] regionName,
+      @ThriftField(name="scan") final Scan scan)
+      throws ThriftHBaseException;
+
+  @ThriftMethod(value = "mutateRow", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public void mutateRow(@ThriftField(name="regionName") byte[] regionName,
+      @ThriftField(name="arm") TRowMutations arm)
+      throws ThriftHBaseException;
+
+  @ThriftMethod(value = "mutateRowAsync", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public ListenableFuture<Void> mutateRowAsync(
+      @ThriftField(name="regionName") byte[] regionName,
+      @ThriftField(name="arm") TRowMutations arm);
+
+  @ThriftMethod(value = "mutateRows", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public void mutateRows(@ThriftField(name="regionName") byte[] regionName,
+      @ThriftField(name="armList") List<TRowMutations> armList)
+      throws ThriftHBaseException;
+
+  /**
+   * Get the next set of values. Do not use with thrift
+   * @param scannerId clientId passed to openScanner
+   * @return map of values; returns null if no results.
+   * @throws ThriftHBaseException e
+   */
+  @Deprecated
+  @ThriftMethod(value = "next", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public Result next(@ThriftField(name="scannerId") long scannerId)
+      throws ThriftHBaseException;
+
+  /**
+   * Get the next set of values
+   * @param scannerId clientId passed to openScanner
+   * @param numberOfRows the number of rows to fetch
+   * @return Array of Results (map of values); array is empty if done with this
+   * region and null if we are NOT to go to the next region (happens when a
+   * filter rules that the scan is done).
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "nextRows", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public List<Result> nextRows(@ThriftField(name="scannerId") long scannerId,
+      @ThriftField(name="numberOfRows") int numberOfRows)
+      throws ThriftHBaseException;
+
+  /**
+   * Close a scanner
+   *
+   * @param scannerId the scanner id returned by openScanner
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "close", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public void close(@ThriftField(name="scannerId") long scannerId) throws ThriftHBaseException;
+
+  /**
+   * Opens a remote row lock.
+   *
+   * @param regionName name of region
+   * @param row row to lock
+   * @return lockId lock identifier
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "lockRow", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public long lockRow(@ThriftField(name="regionName") final byte[] regionName,
+      @ThriftField(name="row") final byte[] row)
+      throws ThriftHBaseException;
+
+  @ThriftMethod(value = "lockRowAsync", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public ListenableFuture<RowLock> lockRowAsync(
+      @ThriftField(name="regionName") byte[] regionName,
+     @ThriftField(name="row") byte[] row);
+
+  /**
+   * Releases a remote row lock.
+   *
+   * @param regionName region name
+   * @param lockId the lock id returned by lockRow
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "unlockRow", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public void unlockRow(@ThriftField(name="regionName") final byte[] regionName,
+      @ThriftField(name="lockId") final long lockId)
+      throws ThriftHBaseException;
+
+  @ThriftMethod(value = "unlockRowAsync", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public ListenableFuture<Void> unlockRowAsync(
+      @ThriftField(name="regionName") byte[] regionName,
+      @ThriftField(name="lockId") long lockId);
+
+  /**
+   * Method used when a master is taking the place of another failed one.
+   * @return All regions assigned on this region server
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "getRegionsAssignment", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public List<HRegionInfo> getRegionsAssignment() throws ThriftHBaseException;
+
+  /**
+   * Method used when a master is taking the place of another failed one.
+   * @return The HSI
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "getHServerInfo", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public HServerInfo getHServerInfo() throws ThriftHBaseException;
+
+  /**
+   * Method used for doing multiple actions(Deletes, Gets and Puts) in one call
+   * @param multi
+   * @return MultiResult
+   * @throws ThriftHBaseException
+   */
+  @ThriftMethod(value = "multiAction", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public TMultiResponse multiAction(@ThriftField(name="multi") MultiAction multi)
+      throws ThriftHBaseException;
+
+  /**
+   * Multi put for putting multiple regions worth of puts at once.
+   *
+   * @param puts the request
+   * @return the reply
+   * @throws ThriftHBaseException e
+   */
+  @ThriftMethod(value = "multiPut", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public MultiPutResponse multiPut(@ThriftField(name="puts") MultiPut puts)
+      throws ThriftHBaseException;
+
+  /**
+   * Bulk load an HFile into an open region
+   */
+  @ThriftMethod(value = "bulkLoadHFile", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public void bulkLoadHFile(@ThriftField(name="hfilePath") String hfilePath,
+      @ThriftField(name="regionName") byte[] regionName,
+      @ThriftField(name="familyName") byte[] familyName)
+      throws ThriftHBaseException;
+
+  @ThriftMethod(exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) },
+      value = "bulkLoadHFileSeqNum")
+  public void bulkLoadHFile(
+      @ThriftField(name="hfilePath") String hfilePath,
+      @ThriftField(name="regionName") byte[] regionName,
+      @ThriftField(name="familyName") byte[] familyName,
+      @ThriftField(name="assignSeqNum") boolean assignSeqNum)
+      throws ThriftHBaseException;
+
+  /**
+   * Closes the specified region.
+   * @param hri region to be closed
+   * @param reportWhenCompleted whether to report to master
+   * @throws ThriftHBaseException
+   */
+  @ThriftMethod(value = "closeRegion", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public void closeRegion(@ThriftField(name="hri") final HRegionInfo hri,
+      @ThriftField(name="reportWhenCompleted") final boolean reportWhenCompleted)
+      throws ThriftHBaseException;
+
+  /**
+   * Update the assignment plan for each region server.
+   * @param updatedFavoredNodesMap
+   */
+  @ThriftMethod(value = "updateFavoredNodes", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public int updateFavoredNodes(@ThriftField(name="plan") AssignmentPlan plan)
+      throws ThriftHBaseException;
+
+  /**
+   * Update the configuration.
+   */
+  @ThriftMethod(value = "updateConfiguration", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public void updateConfiguration() throws ThriftHBaseException;
+
+  /**
+   * Stop this service.
+   * @param why Why we're stopping.
+   */
+  @ThriftMethod(value = "stop")
+  public void stop(@ThriftField(name="why") String why);
+
+  /** @return why we are stopping */
+  @ThriftMethod(value = "getStopReason")
+  public String getStopReason();
+
+
+  /**
+   * Set the number of threads to be used for HDFS Quorum reads
+   *
+   * @param maxThreads. quourm reads will be disabled if set to <= 0
+   *
+   */
+  @ThriftMethod(value = "setNumHDFSQuorumReadThreads")
+  public void setNumHDFSQuorumReadThreads(
+      @ThriftField(name="maxThreads") int maxThreads);
+
+  /**
+   * Set the amount of time we wait before initiating a second read when
+   * using HDFS Quorum reads
+   *
+   * @param timeoutMillis.
+   *
+   */
+  @ThriftMethod(value = "setHDFSQuorumReadTimeoutMillis")
+  public void setHDFSQuorumReadTimeoutMillis(
+      @ThriftField(name="timeoutMillis") long timeoutMillis);
+
+  @ThriftMethod(value = "stopForRestart")
+  public void stopForRestart();
+
+  @ThriftMethod(value = "isStopped")
+  public boolean isStopped();
+
+  /**
+   * Get a configuration property from an HRegion
+   *
+   * @param String propName name of configuration property
+   * @return String value of property
+   * @throws IOException e
+   */
+  @ThriftMethod(value = "getConfProperty", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public String getConfProperty(String paramName) throws ThriftHBaseException;
+
+  @ThriftMethod(value = "getHistogram", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public List<Bucket> getHistogram(byte[] regionName)
+      throws ThriftHBaseException;
+
+  /**
+   * Returns the list of buckets which represent the uniform depth histogram
+   * for a given store.
+   * @param regionName
+   * @param family
+   * @return
+   * @throws IOException
+   */
+  @ThriftMethod(value = "getHistogramForStore", exception = {
+      @ThriftException(type = ThriftHBaseException.class, id = 1) })
+  public List<Bucket> getHistogramForStore(byte[] regionName, byte[] family)
+      throws ThriftHBaseException;
+}

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/thrift/HBaseToThriftAdapter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/thrift/HBaseToThriftAdapter.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/thrift/HBaseToThriftAdapter.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/thrift/HBaseToThriftAdapter.java Wed Mar 12 21:17:13 2014
@@ -19,11 +19,14 @@
  */
 package org.apache.hadoop.hbase.ipc.thrift;
 
-import com.facebook.nifty.header.transport.THeaderTransport;
-import com.facebook.swift.service.RuntimeTApplicationException;
-import com.facebook.swift.service.ThriftClientManager;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -63,13 +66,11 @@ import org.apache.thrift.TApplicationExc
 import org.apache.thrift.protocol.TProtocol;
 import org.apache.thrift.transport.TTransport;
 
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
+import com.facebook.nifty.header.transport.THeaderTransport;
+import com.facebook.swift.service.RuntimeTApplicationException;
+import com.facebook.swift.service.ThriftClientManager;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
 
 /**
  * The client should use this class to communicate to the server via thrift
@@ -148,7 +149,8 @@ public class HBaseToThriftAdapter implem
         Call call = new Call(options);
         String stringData = Bytes
             .writeThriftBytesAndGetString(call, Call.class);
-        headerTransport.setHeader(HConstants.THRIFT_HEADER_FROM_CLIENT, stringData);
+        headerTransport.setHeader(HConstants.THRIFT_HEADER_FROM_CLIENT,
+            stringData);
       }
     } else {
       LOG.error("output transport for client was not THeaderTransport, client cannot send headers");
@@ -190,6 +192,7 @@ public class HBaseToThriftAdapter implem
         .getTransport();
     if (inputTransport instanceof THeaderTransport) {
       THeaderTransport headerTransport = (THeaderTransport) outputTransport;
+      headerTransport.clearHeaders();
       String dataString = headerTransport.getReadHeaders().get(HConstants.THRIFT_HEADER_FROM_SERVER);
       if (dataString != null) {
         byte[] dataBytes = Bytes.hexToBytes(dataString);
@@ -1132,8 +1135,10 @@ public class HBaseToThriftAdapter implem
     preProcess();
     try {
        List<Bucket> buckets = connection.getHistogram(regionName);
-       if (buckets.size() == 0) return null;
-       return buckets;
+      if (buckets.isEmpty()) {
+        return null;
+      }
+      return buckets;
     } catch (ThriftHBaseException te) {
       Exception e = te.getServerJavaException();
       handleIOException(e);
@@ -1154,7 +1159,9 @@ public class HBaseToThriftAdapter implem
     try {
       List<Bucket> buckets =
           connection.getHistogramForStore(regionName, family);
-      if (buckets.size() == 0) return null;
+      if (buckets.isEmpty()) {
+        return null;
+      }
       return buckets;
     } catch (ThriftHBaseException te) {
       Exception e = te.getServerJavaException();

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java Wed Mar 12 21:17:13 2014
@@ -107,6 +107,10 @@ public class TableMapReduceUtil {
     }
   }
 
+  public static void initCredentials(JobConf job) throws IOException {
+    // no-op, method added as Hive code calls this method
+  }
+  
   /**
    * Ensures that the given number of reduce tasks for the given job
    * configuration does not exceed the number of regions for the given table.

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java Wed Mar 12 21:17:13 2014
@@ -92,7 +92,7 @@ public class Export {
     long endTime = args.length > 4? Long.parseLong(args[4]): Long.MAX_VALUE;
     s.setTimeRange(startTime, endTime);
     s.setCacheBlocks(false);
-
+    
     if (conf.get(TableInputFormat.SCAN_COLUMN_FAMILY) != null) {
       s.addFamily(Bytes.toBytes(conf.get(TableInputFormat.SCAN_COLUMN_FAMILY)));
     }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java Wed Mar 12 21:17:13 2014
@@ -662,7 +662,7 @@ public class HFileOutputFormat extends F
    * @return Fake TaskAttemptContext to get a KeyValue writer
    */
   public static TaskAttemptContext createFakeTaskAttemptContext(
-      Configuration conf, Path outputDir, int identifier) {
+    Configuration conf, Path outputDir, int identifier) {
     JobID jobId = new JobID();
     TaskID taskId = new TaskID(jobId, false, identifier);
     conf.set("mapred.output.dir", outputDir.toString());

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogSplitter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogSplitter.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogSplitter.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogSplitter.java Wed Mar 12 21:17:13 2014
@@ -42,22 +42,22 @@ import org.apache.hadoop.util.GenericOpt
 import org.mortbay.log.Log;
 
 /**
- * MR based tool to split HBase logs in a distributed fashion.
-
- * The tool first creates a list of directories and puts them into
- * a file in the HDFS root directory. This file is used as the input
- * to the mappers, there are no reducers. Each mapper processes the
+ * MR based tool to split HBase logs in a distributed fashion. 
+ 
+ * The tool first creates a list of directories and puts them into 
+ * a file in the HDFS root directory. This file is used as the input 
+ * to the mappers, there are no reducers. Each mapper processes the 
  * directory it receives as input.
  */
 @Deprecated
 public class HLogSplitter {
   final static String NAME = "splitlogs";
-
-  static class HLogSpliterMap
+  
+  static class HLogSpliterMap 
   extends Mapper<LongWritable, Text, Text, Text> {
 
     /**
-     * Takes an entire HLog directory of a region server as the input
+     * Takes an entire HLog directory of a region server as the input 
      * and splits the logs.
      *
      * @param key     The mapper input key
@@ -75,7 +75,7 @@ public class HLogSplitter {
         Path logOutputPath = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME);
         Log.info("HLogs input dir      : " + logInputPath.toString());
         Log.info("Split log output dir : " + logOutputPath.toString());
-
+        
         FileSystem fs = FileSystem.get(conf);
         if (!fs.exists(logInputPath)) {
           throw new FileNotFoundException(logInputPath.toString());
@@ -127,15 +127,15 @@ public class HLogSplitter {
       usage("Directory does not exist: " + logsDirPath);
       System.exit(-1);
     }
-
-    // list all the directories in the .logs directory. Typically this there is
+    
+    // list all the directories in the .logs directory. Typically this there is 
     // one directory per regionserver.
     FileStatus[] logFolders = fs.listStatus(logsDirPath);
     if (logFolders == null || logFolders.length == 0) {
       usage("No log files to split in " + logsDirPath);
       System.exit(-1);
     }
-
+    
     // write the list of RS directories to a temp file in HDFS. This will be the
     // input to the mapper.
     String jobInputFile = "/" + NAME + "_" + System.currentTimeMillis();
@@ -149,7 +149,7 @@ public class HLogSplitter {
     }
     out.close();
     dos.close();
-
+    
     // create the job that will do the distributed log splitting
     Job job = new Job(conf, NAME + "_" + logsDirPath);
     job.setJobName(NAME + "_" + logsDirPath);
@@ -159,11 +159,11 @@ public class HLogSplitter {
     job.setInputFormatClass(TextInputFormat.class);
     FileInputFormat.setInputPaths(job, jobInputFile);
     FileOutputFormat.setOutputPath(job, jobOutputPath);
-
+    
     // submit the job
     boolean status = job.waitForCompletion(true);
 
-    // delete jobInputFile and the output directory once we are done with the
+    // delete jobInputFile and the output directory once we are done with the 
     // job
 //    fs.delete(jobInputPath);
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java Wed Mar 12 21:17:13 2014
@@ -73,7 +73,7 @@ public class ImportTsv {
     private final byte separatorByte;
 
     private int rowKeyColumnIndex;
-
+    
     public static String ROWKEY_COLUMN_SPEC="HBASE_ROW_KEY";
 
     /**
@@ -90,7 +90,7 @@ public class ImportTsv {
       // Configure columns
       ArrayList<String> columnStrings = Lists.newArrayList(
         Splitter.on(',').trimResults().split(columnsSpecification));
-
+      
       families = new byte[columnStrings.size()][];
       qualifiers = new byte[columnStrings.size()][];
 
@@ -110,7 +110,7 @@ public class ImportTsv {
         }
       }
     }
-
+    
     public int getRowKeyColumnIndex() {
       return rowKeyColumnIndex;
     }
@@ -120,7 +120,7 @@ public class ImportTsv {
     public byte[] getQualifier(int idx) {
       return qualifiers[idx];
     }
-
+    
     public ParsedLine parse(byte[] lineBytes, int length)
     throws BadTsvLineException {
       // Enumerate separator offsets
@@ -137,16 +137,16 @@ public class ImportTsv {
 
       return new ParsedLine(tabOffsets, lineBytes);
     }
-
+    
     class ParsedLine {
       private final ArrayList<Integer> tabOffsets;
       private byte[] lineBytes;
-
+      
       ParsedLine(ArrayList<Integer> tabOffsets, byte[] lineBytes) {
         this.tabOffsets = tabOffsets;
         this.lineBytes = lineBytes;
       }
-
+      
       public int getRowKeyOffset() {
         return getColumnOffset(rowKeyColumnIndex);
       }
@@ -158,7 +158,7 @@ public class ImportTsv {
           return tabOffsets.get(idx - 1) + 1;
         else
           return 0;
-      }
+      }      
       public int getColumnLength(int idx) {
         return tabOffsets.get(idx) - getColumnOffset(idx);
       }
@@ -169,7 +169,7 @@ public class ImportTsv {
         return lineBytes;
       }
     }
-
+    
     public static class BadTsvLineException extends Exception {
       public BadTsvLineException(String err) {
         super(err);
@@ -177,14 +177,14 @@ public class ImportTsv {
       private static final long serialVersionUID = 1L;
     }
   }
-
+  
   /**
    * Write table content out to files in hdfs.
    */
   static class TsvImporter
   extends Mapper<LongWritable, Text, ImmutableBytesWritable, Put>
   {
-
+    
     /** Timestamp for all inserted rows */
     private long ts;
 
@@ -288,9 +288,9 @@ public class ImportTsv {
       TableMapReduceUtil.initTableReducerJob(tableName, null, job);
       job.setNumReduceTasks(0);
     }
-
+    
     TableMapReduceUtil.addDependencyJars(job);
-    TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
+    TableMapReduceUtil.addDependencyJars(job.getConfiguration(), 
         com.google.common.base.Function.class /* Guava used by TsvParser */);
     return job;
   }
@@ -302,7 +302,7 @@ public class ImportTsv {
     if (errorMsg != null && errorMsg.length() > 0) {
       System.err.println("ERROR: " + errorMsg);
     }
-    String usage =
+    String usage = 
       "Usage: " + NAME + " -Dimporttsv.columns=a,b,c <tablename> <inputdir>\n" +
       "\n" +
       "Imports the given input directory of TSV data into the specified table.\n" +

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java Wed Mar 12 21:17:13 2014
@@ -39,7 +39,7 @@ import org.apache.hadoop.util.StringUtil
  */
 public class PutSortReducer extends
     Reducer<ImmutableBytesWritable, Put, ImmutableBytesWritable, KeyValue> {
-
+  
   @Override
   protected void reduce(
       ImmutableBytesWritable row,

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java Wed Mar 12 21:17:13 2014
@@ -101,7 +101,7 @@ public class RowCounter {
         }
         startKey = startEnd[0];
         endKey = startEnd[1];
-      }
+      } 
       else {
         // if no switch, assume column names
         sb.append(args[i]);

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/RowMutationSortReducer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/RowMutationSortReducer.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/RowMutationSortReducer.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/RowMutationSortReducer.java Wed Mar 12 21:17:13 2014
@@ -43,7 +43,7 @@ import org.apache.hadoop.util.StringUtil
  */
 public class RowMutationSortReducer extends
     Reducer<ImmutableBytesWritable, RowMutation, ImmutableBytesWritable, KeyValue> {
-
+  
   @Override
   protected void reduce(
       ImmutableBytesWritable row,

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java Wed Mar 12 21:17:13 2014
@@ -51,10 +51,10 @@ implements Configurable {
   public static final String START = "hbase.simpletotalorder.start";
   @Deprecated
   public static final String END = "hbase.simpletotalorder.end";
-
+  
   static final String START_BASE64 = "hbase.simpletotalorder.start.base64";
   static final String END_BASE64 = "hbase.simpletotalorder.end.base64";
-
+  
   private Configuration c;
   private byte [] startkey;
   private byte [] endkey;
@@ -64,21 +64,21 @@ implements Configurable {
   public static void setStartKey(Configuration conf, byte[] startKey) {
     conf.set(START_BASE64, Base64.encodeBytes(startKey));
   }
-
+  
   public static void setEndKey(Configuration conf, byte[] endKey) {
     conf.set(END_BASE64, Base64.encodeBytes(endKey));
   }
-
+  
   @SuppressWarnings("deprecation")
   static byte[] getStartKey(Configuration conf) {
     return getKeyFromConf(conf, START_BASE64, START);
   }
-
+  
   @SuppressWarnings("deprecation")
   static byte[] getEndKey(Configuration conf) {
     return getKeyFromConf(conf, END_BASE64, END);
   }
-
+  
   private static byte[] getKeyFromConf(Configuration conf,
       String base64Key, String deprecatedKey) {
     String encoded = conf.get(base64Key);
@@ -93,7 +93,7 @@ implements Configurable {
         " - please use static accessor methods instead.");
     return Bytes.toBytes(oldStyleVal);
   }
-
+  
   @Override
   public int getPartition(final ImmutableBytesWritable key, final VALUE value,
       final int reduces) {

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java Wed Mar 12 21:17:13 2014
@@ -190,7 +190,7 @@ extends InputFormat<ImmutableBytesWritab
       splitKeys.setSecond(stopKeys);
     }
 
-    List<InputSplit> splits =
+    List<InputSplit> splits = 
         new ArrayList<InputSplit>(numRegions * numMappers);
     byte[] startRow = scan.getStartRow();
     byte[] stopRow = scan.getStopRow();
@@ -415,10 +415,10 @@ extends InputFormat<ImmutableBytesWritab
   protected void setTableRecordReader(TableRecordReader tableRecordReader) {
     this.tableRecordReader = tableRecordReader;
   }
-
+  
   /**
    * Sets the number of mappers assigned to each region.
-   *
+   * 
    * @param num
    * @throws IllegalArgumentException When <code>num</code> <= 0.
    */

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java Wed Mar 12 21:17:13 2014
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.util.Base
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.zookeeper.ZooKeeper;
@@ -62,7 +63,7 @@ public class TableMapReduceUtil {
   // calls than required
   static ConcurrentSkipListMap<String, Entry<Job, Integer>> numRegionsCached =
       new ConcurrentSkipListMap<String, Entry<Job, Integer>>();
-
+  
   /**
    * Use this before submitting a TableMap job. It will appropriately set up
    * the job.
@@ -245,6 +246,10 @@ public class TableMapReduceUtil {
     }
   }
 
+  public static void initCredentials(JobConf job) throws IOException {
+    // no-op, method added as Hive code calls this method
+  }
+  
   /**
    * Ensures that the given number of reduce tasks for the given job
    * configuration does not exceed the number of regions for the given table.
@@ -338,9 +343,9 @@ public class TableMapReduceUtil {
           job.getCombinerClass());
     } catch (ClassNotFoundException e) {
       throw new IOException(e);
-    }
+    }    
   }
-
+  
   /**
    * Add the jars containing the given classes to the job's configuration
    * such that JobClient will ship them to the cluster and add them to
@@ -348,13 +353,13 @@ public class TableMapReduceUtil {
    */
   public static void addDependencyJars(Configuration conf,
       Class... classes) throws IOException {
-
+    
     FileSystem localFs = FileSystem.getLocal(conf);
 
     Set<String> jars = new HashSet<String>();
     for (Class clazz : classes) {
       if (clazz == null) continue;
-
+      
       String pathStr = findContainingJar(clazz);
       if (pathStr == null) {
         LOG.warn("Could not find jar for class " + clazz +
@@ -370,7 +375,7 @@ public class TableMapReduceUtil {
       jars.add(path.makeQualified(localFs).toString());
     }
     if (jars.isEmpty()) return;
-
+    
     String tmpJars = conf.get("tmpjars");
     if (tmpJars == null) {
       tmpJars = StringUtils.arrayToString(jars.toArray(new String[0]));
@@ -379,14 +384,14 @@ public class TableMapReduceUtil {
     }
     conf.set("tmpjars", tmpJars);
   }
-
-  /**
+  
+  /** 
    * Find a jar that contains a class of the same name, if any.
    * It will return a jar file, even if that is not the first thing
    * on the class path that has a class with the same name.
-   *
+   * 
    * This is shamelessly copied from JobConf
-   *
+   * 
    * @param my_class the class to find.
    * @return a jar file that contains the class, or null.
    * @throws IOException

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java Wed Mar 12 21:17:13 2014
@@ -79,7 +79,7 @@ public class TableRecordReaderImpl {
    */
   public void init(Configuration conf) throws IOException {
     LOG.info("Scanner init ; " +
-             " start row = " + Bytes.toStringBinary(scan.getStartRow()) +
+             " start row = " + Bytes.toStringBinary(scan.getStartRow()) + 
              " stop row = " + Bytes.toStringBinary(scan.getStopRow()));
     this.conf = conf;
     restart(scan.getStartRow());

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/InputSampler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/InputSampler.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/InputSampler.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/InputSampler.java Wed Mar 12 21:17:13 2014
@@ -78,7 +78,7 @@ public class InputSampler<K,V> extends C
   }
 
   /**
-   * Interface to sample using an
+   * Interface to sample using an 
    * {@link org.apache.hadoop.mapreduce.InputFormat}.
    */
   public interface Sampler<K,V> {
@@ -86,7 +86,7 @@ public class InputSampler<K,V> extends C
      * For a given job, collect and return a subset of the keys from the
      * input data.
      */
-    K[] getSample(InputFormat<K,V> inf, Job job)
+    K[] getSample(InputFormat<K,V> inf, Job job) 
     throws IOException, InterruptedException;
   }
 
@@ -124,7 +124,7 @@ public class InputSampler<K,V> extends C
      * From each split sampled, take the first numSamples / numSplits records.
      */
     @SuppressWarnings("unchecked") // ArrayList::toArray doesn't preserve type
-    public K[] getSample(InputFormat<K,V> inf, Job job)
+    public K[] getSample(InputFormat<K,V> inf, Job job) 
         throws IOException, InterruptedException {
       List<InputSplit> splits = inf.getSplits(job);
       ArrayList<K> samples = new ArrayList<K>(numSamples);
@@ -134,8 +134,8 @@ public class InputSampler<K,V> extends C
       long records = 0;
       for (int i = 0; i < splitsToSample; ++i) {
         RecordReader<K,V> reader = inf.createRecordReader(
-          splits.get(i * splitStep),
-          new TaskAttemptContext(job.getConfiguration(),
+          splits.get(i * splitStep), 
+          new TaskAttemptContext(job.getConfiguration(), 
                                  new TaskAttemptID()));
         while (reader.nextKeyValue()) {
           samples.add(reader.getCurrentKey());
@@ -191,7 +191,7 @@ public class InputSampler<K,V> extends C
      * the quota of keys from that split is satisfied.
      */
     @SuppressWarnings("unchecked") // ArrayList::toArray doesn't preserve type
-    public K[] getSample(InputFormat<K,V> inf, Job job)
+    public K[] getSample(InputFormat<K,V> inf, Job job) 
         throws IOException, InterruptedException {
       List<InputSplit> splits = inf.getSplits(job);
       ArrayList<K> samples = new ArrayList<K>(numSamples);
@@ -213,8 +213,8 @@ public class InputSampler<K,V> extends C
       // the target sample keyset
       for (int i = 0; i < splitsToSample ||
                      (i < splits.size() && samples.size() < numSamples); ++i) {
-        RecordReader<K,V> reader = inf.createRecordReader(splits.get(i),
-          new TaskAttemptContext(job.getConfiguration(),
+        RecordReader<K,V> reader = inf.createRecordReader(splits.get(i), 
+          new TaskAttemptContext(job.getConfiguration(), 
                                  new TaskAttemptID()));
         while (reader.nextKeyValue()) {
           if (r.nextDouble() <= freq) {
@@ -272,7 +272,7 @@ public class InputSampler<K,V> extends C
      * frequency.
      */
     @SuppressWarnings("unchecked") // ArrayList::toArray doesn't preserve type
-    public K[] getSample(InputFormat<K,V> inf, Job job)
+    public K[] getSample(InputFormat<K,V> inf, Job job) 
         throws IOException, InterruptedException {
       List<InputSplit> splits = inf.getSplits(job);
       ArrayList<K> samples = new ArrayList<K>();
@@ -283,7 +283,7 @@ public class InputSampler<K,V> extends C
       for (int i = 0; i < splitsToSample; ++i) {
         RecordReader<K,V> reader = inf.createRecordReader(
           splits.get(i * splitStep),
-          new TaskAttemptContext(job.getConfiguration(),
+          new TaskAttemptContext(job.getConfiguration(), 
                                  new TaskAttemptID()));
         while (reader.nextKeyValue()) {
           ++records;
@@ -305,10 +305,10 @@ public class InputSampler<K,V> extends C
    * returned from {@link TotalOrderPartitioner#getPartitionFile}.
    */
   @SuppressWarnings("unchecked") // getInputFormat, getOutputKeyComparator
-  public static <K,V> void writePartitionFile(Job job, Sampler<K,V> sampler)
+  public static <K,V> void writePartitionFile(Job job, Sampler<K,V> sampler) 
       throws IOException, ClassNotFoundException, InterruptedException {
     Configuration conf = job.getConfiguration();
-    final InputFormat inf =
+    final InputFormat inf = 
         ReflectionUtils.newInstance(job.getInputFormatClass(), conf);
     int numPartitions = job.getNumReduceTasks();
     K[] samples = sampler.getSample(inf, job);
@@ -321,7 +321,7 @@ public class InputSampler<K,V> extends C
     if (fs.exists(dst)) {
       fs.delete(dst, false);
     }
-    SequenceFile.Writer writer = SequenceFile.createWriter(fs,
+    SequenceFile.Writer writer = SequenceFile.createWriter(fs, 
       conf, dst, job.getMapOutputKeyClass(), NullWritable.class);
     NullWritable nullValue = NullWritable.get();
     float stepSize = samples.length / (float) numPartitions;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/TotalOrderPartitioner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/TotalOrderPartitioner.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/TotalOrderPartitioner.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/TotalOrderPartitioner.java Wed Mar 12 21:17:13 2014
@@ -38,7 +38,7 @@ import org.apache.hadoop.util.Reflection
 /**
  * Partitioner effecting a total order by reading split points from
  * an externally generated source.
- *
+ * 
  * This is an identical copy of o.a.h.mapreduce.lib.partition.TotalOrderPartitioner
  * from Hadoop trunk at r910774.
  */
@@ -47,11 +47,11 @@ public class TotalOrderPartitioner<K ext
 
   private Node partitions;
   public static final String DEFAULT_PATH = "_partition.lst";
-  public static final String PARTITIONER_PATH =
+  public static final String PARTITIONER_PATH = 
     "mapreduce.totalorderpartitioner.path";
-  public static final String MAX_TRIE_DEPTH =
-    "mapreduce.totalorderpartitioner.trie.maxdepth";
-  public static final String NATURAL_ORDER =
+  public static final String MAX_TRIE_DEPTH = 
+    "mapreduce.totalorderpartitioner.trie.maxdepth"; 
+  public static final String NATURAL_ORDER = 
     "mapreduce.totalorderpartitioner.naturalorder";
   Configuration conf;
 
@@ -96,11 +96,11 @@ public class TotalOrderPartitioner<K ext
       if (natOrder && BinaryComparable.class.isAssignableFrom(keyClass)) {
         partitions = buildTrie((BinaryComparable[])splitPoints, 0,
             splitPoints.length, new byte[0],
-            // Now that blocks of identical splitless trie nodes are
+            // Now that blocks of identical splitless trie nodes are 
             // represented reentrantly, and we develop a leaf for any trie
             // node with only one split point, the only reason for a depth
             // limit is to refute stack overflow or bloat in the pathological
-            // case where the split points are long and mostly look like bytes
+            // case where the split points are long and mostly look like bytes 
             // iii...iixii...iii   .  Therefore, we make the default depth
             // limit large but not huge.
             conf.getInt(MAX_TRIE_DEPTH, 200));
@@ -115,7 +115,7 @@ public class TotalOrderPartitioner<K ext
   public Configuration getConf() {
     return conf;
   }
-
+  
   // by construction, we know if our keytype
   @SuppressWarnings("unchecked") // is memcmp-able and uses the trie
   public int getPartition(K key, V value, int numPartitions) {
@@ -201,7 +201,7 @@ public class TotalOrderPartitioner<K ext
       return child[0xFF & key.getBytes()[level]].findPartition(key);
     }
   }
-
+  
   /**
    * @param level        the tree depth at this node
    * @param splitPoints  the full split point vector, which holds
@@ -209,7 +209,7 @@ public class TotalOrderPartitioner<K ext
    *                     should contain
    * @param lower        first INcluded element of splitPoints
    * @param upper        first EXcluded element of splitPoints
-   * @return  a leaf node.  They come in three kinds: no split points
+   * @return  a leaf node.  They come in three kinds: no split points 
    *          [and the findParttion returns a canned index], one split
    *          point [and we compare with a single comparand], or more
    *          than one [and we do a binary search].  The last case is
@@ -220,10 +220,10 @@ public class TotalOrderPartitioner<K ext
       switch (upper - lower) {
       case 0:
           return new UnsplitTrieNode(level, lower);
-
+          
       case 1:
           return new SinglySplitTrieNode(level, splitPoints, lower);
-
+          
       default:
           return new LeafTrieNode(level, splitPoints, lower, upper);
       }
@@ -231,8 +231,8 @@ public class TotalOrderPartitioner<K ext
 
   /**
    * A leaf trie node that scans for the key between lower..upper.
-   *
-   * We don't generate many of these now, since we usually continue trie-ing
+   * 
+   * We don't generate many of these now, since we usually continue trie-ing 
    * when more than one split point remains at this level. and we make different
    * objects for nodes with 0 or 1 split point.
    */
@@ -251,30 +251,30 @@ public class TotalOrderPartitioner<K ext
       return (pos < 0) ? -pos : pos;
     }
   }
-
+  
   private class UnsplitTrieNode extends TrieNode {
       final int result;
-
+      
       UnsplitTrieNode(int level, int value) {
           super(level);
           this.result = value;
       }
-
+      
       public int findPartition(BinaryComparable key) {
           return result;
       }
   }
-
+  
   private class SinglySplitTrieNode extends TrieNode {
       final int               lower;
       final BinaryComparable  mySplitPoint;
-
+      
       SinglySplitTrieNode(int level, BinaryComparable[] splitPoints, int lower) {
           super(level);
           this.lower = lower;
           this.mySplitPoint = splitPoints[lower];
       }
-
+      
       public int findPartition(BinaryComparable key) {
           return lower + (key.compareTo(mySplitPoint) < 0 ? 0 : 1);
       }
@@ -304,25 +304,25 @@ public class TotalOrderPartitioner<K ext
     reader.close();
     return parts.toArray((K[])Array.newInstance(keyClass, parts.size()));
   }
-
+  
   /**
-   *
+   * 
    * This object contains a TrieNodeRef if there is such a thing that
-   * can be repeated.  Two adjacent trie node slots that contain no
+   * can be repeated.  Two adjacent trie node slots that contain no 
    * split points can be filled with the same trie node, even if they
    * are not on the same level.  See buildTreeRec, below.
    *
-   */
+   */  
   private class CarriedTrieNodeRef
   {
       TrieNode   content;
-
+      
       CarriedTrieNodeRef() {
           content = null;
       }
   }
 
-
+  
   /**
    * Given a sorted set of cut points, build a trie that will find the correct
    * partition quickly.
@@ -338,31 +338,31 @@ public class TotalOrderPartitioner<K ext
       return buildTrieRec
                (splits, lower, upper, prefix, maxDepth, new CarriedTrieNodeRef());
   }
-
+  
   /**
    * This is the core of buildTrie.  The interface, and stub, above, just adds
-   * an empty CarriedTrieNodeRef.
-   *
+   * an empty CarriedTrieNodeRef.  
+   * 
    * We build trie nodes in depth first order, which is also in key space
    * order.  Every leaf node is referenced as a slot in a parent internal
    * node.  If two adjacent slots [in the DFO] hold leaf nodes that have
-   * no split point, then they are not separated by a split point either,
+   * no split point, then they are not separated by a split point either, 
    * because there's no place in key space for that split point to exist.
-   *
+   * 
    * When that happens, the leaf nodes would be semantically identical, and
-   * we reuse the object.  A single CarriedTrieNodeRef "ref" lives for the
+   * we reuse the object.  A single CarriedTrieNodeRef "ref" lives for the 
    * duration of the tree-walk.  ref carries a potentially reusable, unsplit
-   * leaf node for such reuse until a leaf node with a split arises, which
+   * leaf node for such reuse until a leaf node with a split arises, which 
    * breaks the chain until we need to make a new unsplit leaf node.
-   *
-   * Note that this use of CarriedTrieNodeRef means that for internal nodes,
-   * for internal nodes if this code is modified in any way we still need
+   * 
+   * Note that this use of CarriedTrieNodeRef means that for internal nodes, 
+   * for internal nodes if this code is modified in any way we still need 
    * to make or fill in the subnodes in key space order.
    */
   private TrieNode buildTrieRec(BinaryComparable[] splits, int lower,
       int upper, byte[] prefix, int maxDepth, CarriedTrieNodeRef ref) {
     final int depth = prefix.length;
-    // We generate leaves for a single split point as well as for
+    // We generate leaves for a single split point as well as for 
     // no split points.
     if (depth >= maxDepth || lower >= upper - 1) {
         // If we have two consecutive requests for an unsplit trie node, we
@@ -393,9 +393,9 @@ public class TotalOrderPartitioner<K ext
     }
     // pick up the rest
     trial[depth] = (byte)0xFF;
-    result.child[0xFF]
+    result.child[0xFF] 
                  = buildTrieRec(splits, lower, currentBound, trial, maxDepth, ref);
-
+    
     return result;
   }
 }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/loadtest/CompositeOperationGenerator.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/loadtest/CompositeOperationGenerator.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/loadtest/CompositeOperationGenerator.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/loadtest/CompositeOperationGenerator.java Wed Mar 12 21:17:13 2014
@@ -73,9 +73,10 @@ public class CompositeOperationGenerator
    * @throws ExhaustedException if the last of this instance's child generators
    *         has itself become exhausted
    */
+  @Override
   public Operation nextOperation(DataGenerator dataGenerator)
       throws ExhaustedException {
-    if (generators.size() == 0) {
+    if (generators.isEmpty()) {
       throw new ExhaustedException();
     }
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentDomain.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentDomain.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentDomain.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentDomain.java Wed Mar 12 21:17:13 2014
@@ -41,7 +41,7 @@ public class AssignmentDomain {
   private RackManager rackManager;
   private Map<HServerAddress, String> regionServerToRackMap;
   private Random random;
-
+  
   public AssignmentDomain(Configuration conf) {
     rackToRegionServerMap = new HashMap<String, List<HServerAddress>>();
     regionServerToRackMap = new HashMap<HServerAddress, String>();
@@ -49,7 +49,7 @@ public class AssignmentDomain {
     rackManager = new RackManager(conf);
     random = new Random();
   }
-
+  
   /**
    * Set the random seed
    * @param seed
@@ -73,7 +73,7 @@ public class AssignmentDomain {
    * Get a random rack except for the current rack
    * @param skipRackSet
    * @return the random rack except for any Rack from the skipRackSet
-   * @throws IOException
+   * @throws IOException 
    */
   public String getOneRandomRack(Set<String> skipRackSet) throws IOException {
     if (skipRackSet == null || uniqueRackList.size() <= skipRackSet.size()) {
@@ -85,10 +85,10 @@ public class AssignmentDomain {
       int randomIndex = random.nextInt(this.uniqueRackList.size());
       randomRack = this.uniqueRackList.get(randomIndex);
     } while (skipRackSet.contains(randomRack));
-
+    
     return randomRack;
   }
-
+  
   /**
    * Get one random server from the rack
    * @param rack
@@ -98,40 +98,40 @@ public class AssignmentDomain {
   public HServerAddress getOneRandomServer(String rack) throws IOException {
     return this.getOneRandomServer(rack, null);
   }
-
+  
   /**
    * Get a random server from the rack except for the servers in the skipServerSet
    * @param skipServerSet
    * @return the random server except for any servers from the skipServerSet
-   * @throws IOException
+   * @throws IOException 
    */
   public HServerAddress getOneRandomServer(String rack,
       Set<HServerAddress> skipServerSet) throws IOException {
     if(rack == null) return null;
     List<HServerAddress> serverList = this.rackToRegionServerMap.get(rack);
     if (serverList == null) return null;
-
+    
     // Get a random server except for any servers from the skip set
     if (skipServerSet != null && serverList.size() <= skipServerSet.size()) {
       throw new IOException("Cannot randomly pick another random server");
     }
-
+    
     HServerAddress randomServer;
     do {
       int randomIndex = random.nextInt(serverList.size());
       randomServer = serverList.get(randomIndex);
     } while (skipServerSet != null && skipServerSet.contains(randomServer));
-
+    
     return randomServer;
   }
-
+  
   /**
    * @return the total number of unique rack in the domain.
    */
   public int getTotalRackNum() {
     return this.uniqueRackList.size();
   }
-
+  
   /**
    * Get the list of region severs in the rack
    * @param rack
@@ -140,7 +140,7 @@ public class AssignmentDomain {
   public List<HServerAddress> getServersFromRack(String rack) {
     return this.rackToRegionServerMap.get(rack);
   }
-
+  
   /**
    * Add a server to the assignment domain
    * @param server
@@ -170,7 +170,7 @@ public class AssignmentDomain {
       this.addServer(server);
     }
   }
-
+  
   public Set<HServerAddress> getAllServers() {
     return regionServerToRackMap.keySet();
   }
@@ -188,14 +188,14 @@ public class AssignmentDomain {
   public Map<String, List<HServerAddress>> getRackToRegionServerMap() {
     return this.rackToRegionServerMap;
   }
-
+  
   /**
    * @return true if there is no rack in the assignment domain
    */
   public boolean isEmpty() {
     return uniqueRackList.isEmpty();
   }
-
+  
   /**
    * @return true if can place the favored nodes
    */

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java Wed Mar 12 21:17:13 2014
@@ -19,18 +19,18 @@ import org.apache.hadoop.hbase.HServerAd
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.master.AssignmentPlan.POSITION;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.HasThread;
+import org.apache.hadoop.hbase.util.Threads;
 
 /**
- * Manages the preferences for assigning regions to specific servers. 
+ * Manages the preferences for assigning regions to specific servers.
  * It get the assignment plan from scanning the META region and keep this
  * assignment plan updated.
- * 
+ *
  * The assignment manager executes the assignment plan by adding the regions
  * with its most favored live region server into the transient assignment.
  * Each transient assignment will be only valid for a configurable time
- * before expire. During these valid time, the region will only be assigned 
+ * before expire. During these valid time, the region will only be assigned
  * based on the transient assignment.
  *
  * All access to this class is thread-safe.
@@ -69,12 +69,12 @@ public class AssignmentManager {
    * favored region server list.
    */
   private AssignmentPlan assignmentPlan;
-  
+
   private final HMaster master;
   private final Configuration conf;
   private long millisecondDelay;
   private POSITION[] positions;
-  
+
   public AssignmentManager(HMaster master) {
     this.master = master;
     this.conf = master.getConfiguration();
@@ -169,12 +169,12 @@ public class AssignmentManager {
       Set<HRegionInfo> regions = transientAssignments.get(server);
       if (regions != null) {
         regions.remove(region);
-        if (regions.size() == 0) {
+        if (regions.isEmpty()) {
           transientAssignments.remove(server);
         }
         LOG.debug("Remove the transisent assignment: region " +
             region.getRegionNameAsString() + " to " +
-            server.getHostNameWithPort()); 
+            server.getHostNameWithPort());
         return true;
       }
       return false;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentPlan.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentPlan.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentPlan.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentPlan.java Wed Mar 12 21:17:13 2014
@@ -35,46 +35,56 @@ import org.apache.hadoop.hbase.HServerAd
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.io.Writable;
 
+import com.facebook.swift.codec.ThriftConstructor;
+import com.facebook.swift.codec.ThriftField;
+import com.facebook.swift.codec.ThriftStruct;
+
 /**
  * AssignmentPlan is a writable object for the region assignment plan.
- * It contains the mapping information between each region and
+ * It contains the mapping information between each region and 
  * its favored region server list.
- *
+ * 
  * All the access to this class is thread-safe.
  */
+@ThriftStruct
 public class AssignmentPlan implements Writable{
   protected static final Log LOG = LogFactory.getLog(
       AssignmentPlan.class.getName());
-
+  
   private static final int VERSION = 1;
-
+  
   /** the map between each region and its favored region server list */
   private Map<HRegionInfo, List<HServerAddress>> assignmentMap;
 
   /** the map between each region and its lasted favored server list update
    * time stamp
   */
-  private Map<HRegionInfo, Long> assignmentUpdateTS;
-
+  private Map<HRegionInfo, Long> assignmentUpdateTS = new HashMap<>();
+  
   public static enum POSITION {
     PRIMARY,
     SECONDARY,
     TERTIARY;
   };
-
+  
   public AssignmentPlan() {
     assignmentMap = new HashMap<HRegionInfo, List<HServerAddress>>();
-    assignmentUpdateTS = new HashMap<HRegionInfo, Long>();
+  }
+
+  @ThriftConstructor
+  public AssignmentPlan(
+      @ThriftField(1) Map<HRegionInfo, List<HServerAddress>> assignmentMap) {
+    this.assignmentMap = assignmentMap;
   }
 
   /**
    * Initialize the assignment plan with the existing primary region server map
-   * and the existing secondary/tertiary region server map
-   *
+   * and the existing secondary/tertiary region server map 
+   * 
    * if any regions cannot find the proper secondary / tertiary region server
    * for whatever reason, just do NOT update the assignment plan for this region
    * @param primaryRSMap
-   * @param secondaryAndTiteraryRSMap
+   * @param secondaryAndTertiaryRSMap
    */
   public void initialize(Map<HRegionInfo, HServerAddress> primaryRSMap,
       Map<HRegionInfo, Pair<HServerAddress, HServerAddress>> secondaryAndTertiaryRSMap) {
@@ -84,7 +94,7 @@ public class AssignmentPlan implements W
       HRegionInfo regionInfo = entry.getKey();
       Pair<HServerAddress, HServerAddress> secondaryAndTertiaryPair =
         entry.getValue();
-
+      
       // Get the primary region server
       HServerAddress primaryRS = primaryRSMap.get(regionInfo);
       if (primaryRS == null) {
@@ -92,7 +102,7 @@ public class AssignmentPlan implements W
             regionInfo.getRegionNameAsString());
         continue;
       }
-
+      
       // Update the assignment plan with the favored nodes
       List<HServerAddress> serverList = new ArrayList<HServerAddress>();
       serverList.add(POSITION.PRIMARY.ordinal(), primaryRS);
@@ -118,16 +128,16 @@ public class AssignmentPlan implements W
     this.assignmentMap.put(region, servers);
     LOG.info("Update the assignment plan for region " +
         region.getRegionNameAsString() + " to favored nodes " +
-        RegionPlacement.getFavoredNodes(servers)
+        RegionPlacement.getFavoredNodes(servers) 
         + " at time stamp " + ts);
   }
-
+  
   /**
    * Add an assignment to the plan
    * @param region
    * @param servers
    */
-  public synchronized void updateAssignmentPlan(HRegionInfo region,
+  public synchronized void updateAssignmentPlan(HRegionInfo region, 
       List<HServerAddress> servers) {
     if (region == null || servers == null || servers.size() ==0)
       return;
@@ -136,7 +146,7 @@ public class AssignmentPlan implements W
         region.getRegionNameAsString() + " ; favored nodes " +
         RegionPlacement.getFavoredNodes(servers));
   }
-
+ 
   /**
    * Remove one assignment from the plan
    * @param region
@@ -145,7 +155,7 @@ public class AssignmentPlan implements W
     this.assignmentMap.remove(region);
     this.assignmentUpdateTS.remove(region);
   }
-
+  
   /**
    * @param region
    * @return true if there is an assignment plan for the particular region.
@@ -153,7 +163,7 @@ public class AssignmentPlan implements W
   public synchronized boolean hasAssignment(HRegionInfo region) {
     return assignmentMap.containsKey(region);
   }
-
+  
   /**
    * @param region
    * @return the list of favored region server for this region based on the plan
@@ -161,7 +171,7 @@ public class AssignmentPlan implements W
   public synchronized List<HServerAddress> getAssignment(HRegionInfo region) {
     return assignmentMap.get(region);
   }
-
+  
   /**
    * @param region
    * @return the last update time stamp for the region in the plan
@@ -173,14 +183,19 @@ public class AssignmentPlan implements W
     else
       return updateTS.longValue();
   }
-
+ 
   /**
    * @return the mapping between each region to its favored region server list
    */
+  @ThriftField(1)
   public synchronized Map<HRegionInfo, List<HServerAddress>> getAssignmentMap() {
     return this.assignmentMap;
   }
-  
+
+  public Map<HRegionInfo, Long> getAssignmentUpdateTSMap() {
+    return this.assignmentUpdateTS;
+  }
+
   @Override
   public void write(DataOutput out) throws IOException {
     out.writeInt(VERSION);
@@ -227,12 +242,12 @@ public class AssignmentPlan implements W
        addr.readFields(in);
        serverList.add(addr);
      }
-
+     
      // add the assignment to favoredAssignmentMap
      this.assignmentMap.put(region, serverList);
    }
  }
-
+ 
  @Override
  public boolean equals(Object o) {
    if (this == o) {
@@ -248,10 +263,11 @@ public class AssignmentPlan implements W
    Map<HRegionInfo, List<HServerAddress>> comparedMap=
      ((AssignmentPlan)o).getAssignmentMap();
 
+     
    // compare the size
    if (comparedMap.size() != this.assignmentMap.size())
      return false;
-
+   
    // compare each element in the assignment map
    for (Map.Entry<HRegionInfo, List<HServerAddress>> entry :
      comparedMap.entrySet()) {
@@ -264,7 +280,7 @@ public class AssignmentPlan implements W
    }
    return true;
  }
-
+ 
   /**
    * Returns the position of the passed server in the list of favored nodes (the
    * position can be primary, secondary or tertiary)

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java Wed Mar 12 21:17:13 2014
@@ -77,7 +77,7 @@ public class AssignmentVerificationRepor
   private float minDispersionScore = Float.MAX_VALUE;
   private Set<HServerAddress> minDispersionScoreServerSet =
     new HashSet<HServerAddress>();
-
+  
   private float avgDispersionNum = 0;
   private float maxDispersionNum = 0;
   private Set<HServerAddress> maxDispersionNumServerSet =
@@ -85,7 +85,7 @@ public class AssignmentVerificationRepor
   private float minDispersionNum = Float.MAX_VALUE;
   private Set<HServerAddress> minDispersionNumServerSet =
     new HashSet<HServerAddress>();
-
+  
   public void fillUp(String tableName, RegionAssignmentSnapshot snapshot,
       Map<String, Map<String, Float>> regionLocalityMap) {
     // Set the table name
@@ -144,7 +144,7 @@ public class AssignmentVerificationRepor
           favoredNodes.get(AssignmentPlan.POSITION.SECONDARY.ordinal());
         HServerAddress tertiaryRS =
           favoredNodes.get(AssignmentPlan.POSITION.TERTIARY.ordinal());
-
+        
         // Update the primary rs to its region set map
         Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS);
         if (regionCounter == null) {
@@ -152,7 +152,7 @@ public class AssignmentVerificationRepor
         }
         regionCounter = regionCounter.intValue() + 1;
         primaryRSToRegionCounterMap.put(primaryRS, regionCounter);
-
+        
         // Update the primary rs to secondary and tertiary rs map
         Set<HServerAddress> secAndTerSet = primaryToSecTerRSMap.get(primaryRS);
         if (secAndTerSet == null) {
@@ -161,7 +161,7 @@ public class AssignmentVerificationRepor
         secAndTerSet.add(secondaryRS);
         secAndTerSet.add(tertiaryRS);
         primaryToSecTerRSMap.put(primaryRS, secAndTerSet);
-
+        
         // Get the position of the current region server in the favored nodes list
         AssignmentPlan.POSITION favoredNodePosition =
           AssignmentPlan.getFavoredServerPosition(favoredNodes, currentRS);
@@ -211,7 +211,7 @@ public class AssignmentVerificationRepor
             "because of " + e);
       }
     }
-
+    
     float dispersionScoreSummary = 0;
     float dispersionNumSummary = 0;
     // Calculate the secondary score for each primary region server
@@ -219,7 +219,7 @@ public class AssignmentVerificationRepor
       primaryRSToRegionCounterMap.entrySet()) {
       HServerAddress primaryRS = entry.getKey();
       Integer regionsOnPrimary = entry.getValue();
-
+      
       // Process the dispersion number and score
       float dispersionScore = 0;
       int dispersionNum = 0;
@@ -237,7 +237,7 @@ public class AssignmentVerificationRepor
       } else if (dispersionScore == this.maxDispersionScore) {
         this.maxDispersionScoreServerSet.add(primaryRS);
       }
-
+      
       // Update the max dispersion num
       if (dispersionNum > this.maxDispersionNum) {
         this.maxDispersionNumServerSet.clear();
@@ -246,7 +246,7 @@ public class AssignmentVerificationRepor
       } else if (dispersionNum == this.maxDispersionNum) {
         this.maxDispersionNumServerSet.add(primaryRS);
       }
-
+      
       // Update the min dispersion score
       if (dispersionScore < this.minDispersionScore) {
         this.minDispersionScoreServerSet.clear();
@@ -255,7 +255,7 @@ public class AssignmentVerificationRepor
       } else if (dispersionScore == this.minDispersionScore) {
         this.minDispersionScoreServerSet.add(primaryRS);
       }
-
+      
       // Update the min dispersion num
       if (dispersionNum < this.minDispersionNum) {
         this.minDispersionNumServerSet.clear();
@@ -264,11 +264,11 @@ public class AssignmentVerificationRepor
       } else if (dispersionNum == this.minDispersionNum) {
         this.minDispersionNumServerSet.add(primaryRS);
       }
-
+      
       dispersionScoreSummary += dispersionScore;
       dispersionNumSummary += dispersionNum;
     }
-
+    
     // Update the avg dispersion score
     if (primaryRSToRegionCounterMap.keySet().size() != 0) {
       this.avgDispersionScore = dispersionScoreSummary /
@@ -276,7 +276,7 @@ public class AssignmentVerificationRepor
       this.avgDispersionNum = dispersionNumSummary /
          (float) primaryRSToRegionCounterMap.keySet().size();
     }
-
+    
     // Fill up the most loaded and least loaded region server information
     for (Map.Entry<HServerAddress, Integer> entry :
       serverToHostingRegionCounterMap.entrySet()) {
@@ -534,7 +534,7 @@ public class AssignmentVerificationRepor
       if (isDetailMode) {
         printHServerAddressSet(maxDispersionNumServerSet);
       }
-
+      
       System.out.println(
           "\tAvg dispersion score: " + df.format(avgDispersionScore) +
           ";\tMax dispersion score: " + df.format(maxDispersionScore) +

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/BaseScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/BaseScanner.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/BaseScanner.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/BaseScanner.java Wed Mar 12 21:17:13 2014
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ExecutionException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -141,11 +142,23 @@ abstract class BaseScanner extends Chore
     maintenanceScan();
   }
 
+  public static Result getOneResultFromScanner(HRegionInterface srvr,
+      long scannerId)
+          throws IOException{
+    Result values = null;
+    Result[] results = srvr.next(scannerId, 1);
+    values = (results == null || results.length == 0) ? null : results[0];
+    return values;
+  }
+
   /**
    * @param metaRegion Region to scan
    * @throws IOException
+   * @throws ExecutionException
+   * @throws InterruptedException
    */
-  protected void scanRegion(final MetaRegion metaRegion) throws IOException {
+  protected void scanRegion(final MetaRegion metaRegion) throws IOException,
+      InterruptedException, ExecutionException {
     HRegionInterface regionServer = null;
     long scannerId = -1L;
     LOG.info(Thread.currentThread().getName() + " scanning meta region " +
@@ -166,7 +179,8 @@ abstract class BaseScanner extends Chore
       s.setCacheBlocks(true);
       scannerId = regionServer.openScanner(metaRegion.getRegionName(), s);
       while (true) {
-        Result values = regionServer.next(scannerId);
+        Result values = BaseScanner.getOneResultFromScanner(
+            regionServer, scannerId);
         if (values == null || values.size() == 0) {
           break;
         }
@@ -181,7 +195,6 @@ abstract class BaseScanner extends Chore
               HConstants.FAVOREDNODES_QUALIFIER);
           AssignmentManager assignmentManager =
             this.master.getRegionManager().getAssignmentManager();
-          
           if (favoredNodes != null) {
             // compare the update TS
             long updateTimeStamp = 
@@ -382,6 +395,7 @@ abstract class BaseScanner extends Chore
    * @return True, if parent row has marker for "daughter row verified present"
    * else, false (and will do fixup adding daughter if daughter not present).
    */
+  @SuppressWarnings("deprecation")
   private boolean verifyDaughterRowPresent(final Result rowContent,
       final byte [] daughter, final HRegionInterface srvr,
       final byte [] metaRegionName,
@@ -556,6 +570,7 @@ abstract class BaseScanner extends Chore
    * @param checkTwice should we check twice before adding a region to unassigned pool.
    * @throws IOException
    */
+  @SuppressWarnings("deprecation")
   protected void checkAssigned(final HRegionInterface regionServer,
     final MetaRegion meta, HRegionInfo info,
     final String hostnameAndPort, final long startCode, boolean checkTwice)