You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by li...@apache.org on 2014/03/12 22:17:20 UTC

svn commit: r1576909 [13/18] - in /hbase/branches/0.89-fb/src: ./ examples/thrift/ main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/avro/ main/java/org/apache/hadoop/hbase/avro/generated/ main/java/org/apache/hadoop/hbase/client/ ma...

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/HbaseConstants.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/HbaseConstants.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/HbaseConstants.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/HbaseConstants.java Wed Mar 12 21:17:13 2014
@@ -6,35 +6,8 @@
  */
 package org.apache.hadoop.hbase.thrift.generated;
 
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 public class HbaseConstants {
 
   public static final long LATEST_TIMESTAMP = 9223372036854775807L;
 
-}
+}
\ No newline at end of file

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java Wed Mar 12 21:17:13 2014
@@ -125,7 +125,7 @@ public class IOError extends TException 
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     tmpMap.put(_Fields.BACKOFF_TIME_MILLIS, new org.apache.thrift.meta_data.FieldMetaData("backoffTimeMillis", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
@@ -491,7 +491,7 @@ public class IOError extends TException 
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -499,7 +499,7 @@ public class IOError extends TException 
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.message = iprot.readString();
               struct.setMessageIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java Wed Mar 12 21:17:13 2014
@@ -112,7 +112,7 @@ public class IllegalArgument extends TEx
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(IllegalArgument.class, metaDataMap);
@@ -326,7 +326,7 @@ public class IllegalArgument extends TEx
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -334,7 +334,7 @@ public class IllegalArgument extends TEx
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.message = iprot.readString();
               struct.setMessageIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java Wed Mar 12 21:17:13 2014
@@ -136,11 +136,11 @@ public class Mutation implements org.apa
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.IS_DELETE, new org.apache.thrift.meta_data.FieldMetaData("isDelete", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.IS_DELETE, new org.apache.thrift.meta_data.FieldMetaData("isDelete", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
-    tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
-    tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
     tmpMap.put(_Fields.WRITE_TO_WAL, new org.apache.thrift.meta_data.FieldMetaData("writeToWAL", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
@@ -672,7 +672,7 @@ public class Mutation implements org.apa
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -680,7 +680,7 @@ public class Mutation implements org.apa
             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
               struct.isDelete = iprot.readBool();
               struct.setIsDeleteIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -688,7 +688,7 @@ public class Mutation implements org.apa
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.column = iprot.readBinary();
               struct.setColumnIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -696,7 +696,7 @@ public class Mutation implements org.apa
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.value = iprot.readBinary();
               struct.setValueIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java Wed Mar 12 21:17:13 2014
@@ -121,9 +121,9 @@ public class TCell implements org.apache
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Bytes")));
-    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCell.class, metaDataMap);
@@ -420,7 +420,7 @@ public class TCell implements org.apache
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -428,7 +428,7 @@ public class TCell implements org.apache
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.value = iprot.readBinary();
               struct.setValueIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -436,7 +436,7 @@ public class TCell implements org.apache
             if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
               struct.timestamp = iprot.readI64();
               struct.setTimestampIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java Wed Mar 12 21:17:13 2014
@@ -145,19 +145,19 @@ public class TRegionInfo implements org.
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.START_KEY, new org.apache.thrift.meta_data.FieldMetaData("startKey", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.START_KEY, new org.apache.thrift.meta_data.FieldMetaData("startKey", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
-    tmpMap.put(_Fields.END_KEY, new org.apache.thrift.meta_data.FieldMetaData("endKey", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.END_KEY, new org.apache.thrift.meta_data.FieldMetaData("endKey", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
-    tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
-    tmpMap.put(_Fields.VERSION, new org.apache.thrift.meta_data.FieldMetaData("version", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.VERSION, new org.apache.thrift.meta_data.FieldMetaData("version", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BYTE)));
-    tmpMap.put(_Fields.SERVER_NAME, new org.apache.thrift.meta_data.FieldMetaData("serverName", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.SERVER_NAME, new org.apache.thrift.meta_data.FieldMetaData("serverName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
-    tmpMap.put(_Fields.PORT, new org.apache.thrift.meta_data.FieldMetaData("port", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.PORT, new org.apache.thrift.meta_data.FieldMetaData("port", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRegionInfo.class, metaDataMap);
@@ -849,7 +849,7 @@ public class TRegionInfo implements org.
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -857,7 +857,7 @@ public class TRegionInfo implements org.
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.startKey = iprot.readBinary();
               struct.setStartKeyIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -865,7 +865,7 @@ public class TRegionInfo implements org.
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.endKey = iprot.readBinary();
               struct.setEndKeyIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -873,7 +873,7 @@ public class TRegionInfo implements org.
             if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
               struct.id = iprot.readI64();
               struct.setIdIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -881,7 +881,7 @@ public class TRegionInfo implements org.
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.name = iprot.readBinary();
               struct.setNameIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -889,7 +889,7 @@ public class TRegionInfo implements org.
             if (schemeField.type == org.apache.thrift.protocol.TType.BYTE) {
               struct.version = iprot.readByte();
               struct.setVersionIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -897,7 +897,7 @@ public class TRegionInfo implements org.
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.serverName = iprot.readBinary();
               struct.setServerNameIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -905,7 +905,7 @@ public class TRegionInfo implements org.
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
               struct.port = iprot.readI32();
               struct.setPortIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java Wed Mar 12 21:17:13 2014
@@ -116,11 +116,11 @@ public class TRowResult implements org.a
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT,
+    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
-    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.DEFAULT,
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , "Text"),
+    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , "Text"), 
             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCell.class))));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRowResult.class, metaDataMap);
@@ -442,7 +442,7 @@ public class TRowResult implements org.a
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -450,7 +450,7 @@ public class TRowResult implements org.a
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.row = iprot.readBinary();
               struct.setRowIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -471,7 +471,7 @@ public class TRowResult implements org.a
                 iprot.readMapEnd();
               }
               struct.setColumnsIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java Wed Mar 12 21:17:13 2014
@@ -158,18 +158,18 @@ public class TScan implements org.apache
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.START_ROW, new org.apache.thrift.meta_data.FieldMetaData("startRow", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.START_ROW, new org.apache.thrift.meta_data.FieldMetaData("startRow", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
-    tmpMap.put(_Fields.STOP_ROW, new org.apache.thrift.meta_data.FieldMetaData("stopRow", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.STOP_ROW, new org.apache.thrift.meta_data.FieldMetaData("stopRow", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
-    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.OPTIONAL,
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , "Text"))));
-    tmpMap.put(_Fields.CACHING, new org.apache.thrift.meta_data.FieldMetaData("caching", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.CACHING, new org.apache.thrift.meta_data.FieldMetaData("caching", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.FILTER_STRING, new org.apache.thrift.meta_data.FieldMetaData("filterString", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.FILTER_STRING, new org.apache.thrift.meta_data.FieldMetaData("filterString", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
     tmpMap.put(_Fields.CACHING_BLOCKS_ENABLED, new org.apache.thrift.meta_data.FieldMetaData("cachingBlocksEnabled", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
@@ -1006,7 +1006,7 @@ public class TScan implements org.apache
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -1014,7 +1014,7 @@ public class TScan implements org.apache
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.startRow = iprot.readBinary();
               struct.setStartRowIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1022,7 +1022,7 @@ public class TScan implements org.apache
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.stopRow = iprot.readBinary();
               struct.setStopRowIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1030,7 +1030,7 @@ public class TScan implements org.apache
             if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
               struct.timestamp = iprot.readI64();
               struct.setTimestampIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1048,7 +1048,7 @@ public class TScan implements org.apache
                 iprot.readListEnd();
               }
               struct.setColumnsIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1056,7 +1056,7 @@ public class TScan implements org.apache
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
               struct.caching = iprot.readI32();
               struct.setCachingIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1064,7 +1064,7 @@ public class TScan implements org.apache
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.filterString = iprot.readBinary();
               struct.setFilterStringIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java Wed Mar 12 21:17:13 2014
@@ -44,11 +44,11 @@ import java.util.Random;
  * occasionally return a false positive, it will never return a false negative.
  * When creating the filter, the sender can choose its desired point in a
  * trade-off between the false positive rate and the size.
- *
+ * 
  * <p>
  * Originally inspired by <a href="http://www.one-lab.org">European Commission
  * One-Lab Project 034819</a>.
- *
+ * 
  * Bloom filters are very sensitive to the number of elements inserted into
  * them. For HBase, the number of entries depends on the size of the data stored
  * in the column. Currently the default region size is 256MB, so entry count ~=
@@ -56,22 +56,22 @@ import java.util.Random;
  * no efficient way to calculate the entry count after compactions. Therefore,
  * it is often easier to use a dynamic bloom filter that will add extra space
  * instead of allowing the error rate to grow.
- *
+ * 
  * ( http://www.eecs.harvard.edu/~michaelm/NEWWORK/postscripts/BloomFilterSurvey
  * .pdf )
- *
+ * 
  * m denotes the number of bits in the Bloom filter (bitSize) n denotes the
  * number of elements inserted into the Bloom filter (maxKeys) k represents the
  * number of hash functions used (nbHash) e represents the desired false
  * positive rate for the bloom (err)
- *
+ * 
  * If we fix the error rate (e) and know the number of entries, then the optimal
  * bloom size m = -(n * ln(err) / (ln(2)^2) ~= n * ln(err) / ln(0.6185)
- *
+ * 
  * The probability of false positives is minimized when k = m/n ln(2).
- *
+ * 
  * @see BloomFilter The general behavior of a filter
- *
+ * 
  * @see <a
  *      href="http://portal.acm.org/citation.cfm?id=362692&dl=ACM&coll=portal">
  *      Space/Time Trade-Offs in Hash Coding with Allowable Errors</a>
@@ -95,12 +95,12 @@ public class ByteBloomFilter implements 
   protected int maxKeys;
   /** Bloom bits */
   protected ByteBuffer bloom;
-
+  
   /** Record separator for the Bloom filter statistics human-readable string */
   public static final String STATS_RECORD_SEP = "; ";
 
   /**
-   * Used in computing the optimal Bloom filter size. This approximately equals
+   * Used in computing the optimal Bloom filter size. This approximately equals 
    * 0.480453.
    */
   public static final double LOG2_SQUARED = Math.log(2) * Math.log(2);
@@ -152,15 +152,15 @@ public class ByteBloomFilter implements 
    *         be an integer.
    */
   public static long computeBitSize(long maxKeys, double errorRate) {
-    return (long) Math.ceil(maxKeys * (-Math.log(errorRate) / LOG2_SQUARED));
+    return (long) Math.ceil(maxKeys * (-Math.log(errorRate) / LOG2_SQUARED)); 
   }
-
+  
   /**
    * The maximum number of keys we can put into a Bloom filter of a certain
    * size to maintain the given error rate, assuming the number of hash
    * functions is chosen optimally and does not even have to be an integer
    * (hence the "ideal" in the function name).
-   *
+   * 
    * @param bitSize
    * @param errorRate
    * @return maximum number of keys that can be inserted into the Bloom filter
@@ -172,7 +172,7 @@ public class ByteBloomFilter implements 
     // more keys in a Bloom filter than is allowed by the target error rate.
     return (long) (bitSize * (LOG2_SQUARED / -Math.log(errorRate)));
   }
-
+  
   /**
    * The maximum number of keys we can put into a Bloom filter of a certain
    * size to get the given error rate, with the given number of hash functions.
@@ -186,7 +186,7 @@ public class ByteBloomFilter implements 
    */
   public static long computeMaxKeys(long bitSize, double errorRate,
       int hashCount) {
-    return (long) (-bitSize * 1.0 / hashCount *
+    return (long) (-bitSize * 1.0 / hashCount * 
         Math.log(1 - Math.exp(Math.log(errorRate) / hashCount)));
   }
 
@@ -196,20 +196,20 @@ public class ByteBloomFilter implements 
    * this function changes as a Bloom filter is being populated. Used for
    * reporting the actual error rate of compound Bloom filters when writing
    * them out.
-   *
+   * 
    * @return error rate for this particular Bloom filter
    */
   public double actualErrorRate() {
     return actualErrorRate(keyCount, byteSize * 8, hashCount);
   }
-
+  
   /**
    * Computes the actual error rate for the given number of elements, number
-   * of bits, and number of hash functions. Taken directly from the
+   * of bits, and number of hash functions. Taken directly from the 
    * <a href=
    * "http://en.wikipedia.org/wiki/Bloom_filter#Probability_of_false_positives"
    * > Wikipedia Bloom filter article</a>.
-   *
+   * 
    * @param maxKeys
    * @param bitSize
    * @param functionCount
@@ -253,11 +253,11 @@ public class ByteBloomFilter implements 
     this.hashType = hashType;
     this.hash = Hash.getInstance(hashType);
   }
-
+  
   /**
    * Determines & initializes bloom filter meta data from user config. Call
    * {@link #allocBloom()} to allocate bloom filter data.
-   *
+   * 
    * @param maxKeys Maximum expected number of keys that will be stored in this
    *          bloom
    * @param errorRate Desired false positive error rate. Lower rate = more
@@ -285,7 +285,7 @@ public class ByteBloomFilter implements 
 
   /**
    * Creates a Bloom filter of the given size.
-   *
+   * 
    * @param byteSizeHint the desired number of bytes for the Bloom filter bit
    *          array. Will be increased so that folding is possible.
    * @param errorRate target false positive rate of the Bloom filter
@@ -309,10 +309,10 @@ public class ByteBloomFilter implements 
 
     return bbf;
   }
-
+  
   /**
    * Creates another similar Bloom filter. Does not copy the actual bits, and
-   * sets the new filter's key count to zero.
+   * sets the new filter's key count to zero. 
    *
    * @return a Bloom filter with the same configuration as this
    */
@@ -420,7 +420,7 @@ public class ByteBloomFilter implements 
     int hash1 = hash.hash(buf, offset, length, 0);
     int hash2 = hash.hash(buf, offset, length, hash1);
     int bloomBitSize = bloomSize * 8;
-
+    
     if (randomGeneratorForTest == null) {
       // Production mode.
       for (int i = 0; i < hashCount; i++) {
@@ -591,7 +591,7 @@ public class ByteBloomFilter implements 
 
   public static void setFakeLookupMode(boolean enabled) {
     if (enabled) {
-      randomGeneratorForTest = new Random(283742987L);
+      randomGeneratorForTest = new Random(283742987L);  
     } else {
       randomGeneratorForTest = null;
     }
@@ -623,7 +623,7 @@ public class ByteBloomFilter implements 
 
   /**
    * A human-readable string with statistics for the given Bloom filter.
-   *
+   * 
    * @param bloomFilter the Bloom filter to output statistics for;
    * @return a string consisting of "&lt;key&gt;: &lt;value&gt;" parts
    *         separated by {@link #STATS_RECORD_SEP}.
@@ -636,7 +636,7 @@ public class ByteBloomFilter implements 
     sb.append("BloomSize: " + bloomFilter.getByteSize() + STATS_RECORD_SEP);
     sb.append("No of Keys in bloom: " + k + STATS_RECORD_SEP);
     sb.append("Max Keys for bloom: " + m);
-    if (m > 0) {
+    if (m > 0) { 
       sb.append(STATS_RECORD_SEP + "Percentage filled: "
           + NumberFormat.getPercentInstance().format(k * 1.0 / m));
     }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java Wed Mar 12 21:17:13 2014
@@ -19,23 +19,30 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import com.facebook.nifty.header.protocol.TFacebookCompactProtocol;
+import com.facebook.swift.codec.ThriftCodec;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.thrift.HBaseNiftyThriftServer;
+import org.apache.hadoop.io.RawComparator;
+import org.apache.hadoop.io.WritableComparator;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.transport.TMemoryBuffer;
+import org.apache.thrift.transport.TMemoryInputTransport;
+
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.math.BigInteger;
 import java.nio.ByteBuffer;
+import java.util.Arrays;
 import java.util.Comparator;
 import java.util.Iterator;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.io.RawComparator;
-import org.apache.hadoop.io.WritableComparator;
-import org.apache.hadoop.io.WritableUtils;
-
 /**
  * Utility class that handles byte arrays, conversions to/from other types,
  * comparisons, hash code generation, manufacturing keys for HashMaps or
@@ -94,6 +101,9 @@ public class Bytes {
   // SizeOf which uses java.lang.instrument says 24 bytes. (3 longs?)
   public static final int ESTIMATED_HEAP_TAX = 16;
 
+  final protected static char[] hexArray = "0123456789ABCDEF".toCharArray();
+
+
   /**
    * Byte array comparator class.
    */
@@ -257,6 +267,26 @@ public class Bytes {
   }
 
   /**
+   * Similar to {@link #toBytes(ByteBuffer)}, except return the underlying
+   * array. This is an optimization so that we don't create another copy of the
+   * underlying byte array, if possible.
+   *
+   * @param reuseUnderlyingArray
+   * @return
+   */
+  public static byte[] toBytes(ByteBuffer bb, boolean reuseUnderlyingArray) {
+    // Return the underlying the ByteBuffer, if we want to reuse it and it is
+    // possible to do so (The offset is 0, and the limit of the BB is equal
+    // to the length of the underlying array).
+    if (reuseUnderlyingArray &&
+      (bb.arrayOffset() == 0 && bb.limit() == bb.array().length)) {
+      return bb.array();
+    }
+    // Return a new byte array.
+    return toBytes(bb);
+  }
+
+  /**
    * Returns a new byte array, copied from the passed ByteBuffer. Starts from the current position
    * in the buffer and copies all the remaining bytes to the limit of the buffer.
    * @param bb A ByteBuffer
@@ -462,6 +492,27 @@ public class Bytes {
     }
   }
 
+
+  public static String bytesToHex(byte[] bytes, int offset, int length) {
+    char[] hexChars = new char[length * 2];
+    for (int j = 0; j < length; j++) {
+      int v = bytes[offset + j] & 0xFF;
+      hexChars[j * 2] = hexArray[v >>> 4];
+      hexChars[j * 2 + 1] = hexArray[v & 0x0F];
+    }
+    return new String(hexChars);
+  }
+
+  public static byte[] hexToBytes(String s) {
+    int len = s.length();
+    byte[] data = new byte[len / 2];
+    for (int i = 0; i < len; i += 2) {
+      data[i / 2] = (byte) ((Character.digit(s.charAt(i), 16) << 4) + Character
+          .digit(s.charAt(i + 1), 16));
+    }
+    return data;
+  }
+
   /**
    * Convert a boolean to a byte array. True becomes -1
    * and false becomes 0.
@@ -1537,4 +1588,65 @@ public class Bytes {
     }
     return Bytes.compareTo(a, aOffs, aLen, b, bOffs, aLen) == 0;
   }
+
+  /**
+   * This is a utility method, that serializes a Swift annotated class' object
+   * into a byte array. This is equivalent to Writable.getBytes().
+   *
+   * @param t The object to be serialized.
+   * @param clazz The class of the object to be serialized
+   * @param <T>
+   * @return The byte array corresponding to the serialized object.
+   * @throws Exception
+   */
+  public static <T> byte[] writeThriftBytes(T t, Class<T> clazz)
+    throws Exception {
+    TMemoryBuffer buffer = writeThriftBytesAndGetBuffer(t, clazz);
+    return (buffer.getArray().length == buffer.length()) ? buffer.getArray()
+      : Arrays.copyOf(buffer.getArray(), buffer.length());
+   }
+
+  public static <T> String writeThriftBytesAndGetString(T t, Class<T> clazz)
+    throws Exception {
+    TMemoryBuffer buffer = writeThriftBytesAndGetBuffer(t, clazz);
+    return Bytes.bytesToHex(buffer.getArray(), 0, buffer.length());
+
+  }
+
+  /**
+   * @param t
+   * @param clazz
+   * @return
+   * @throws Exception
+   */
+  public static <T> TMemoryBuffer writeThriftBytesAndGetBuffer(T t,
+    Class<T> clazz) throws Exception {
+    ThriftCodec<T> codec =
+      HBaseNiftyThriftServer.THRIFT_CODEC_MANAGER.getCodec(clazz);
+    TMemoryBuffer buffer = new TMemoryBuffer(0);
+    //TODO: adela change this to be configurable in future
+    TProtocol protocol = new TFacebookCompactProtocol(buffer);
+    codec.write(t, protocol);
+    return buffer;
+  }
+
+  /**
+   * This is a utility method, that deserializes a Swift annotated class' object
+   * from a byte array. This is equivalent to Writable.getWritable().
+   *
+   * @param buff
+   * @param clazz
+   * @param <T>
+   * @return
+   * @throws Exception
+   */
+  public static <T> T readThriftBytes(byte[] buff, Class<T> clazz)
+    throws Exception {
+    ThriftCodec<T> codec =
+      HBaseNiftyThriftServer.THRIFT_CODEC_MANAGER.getCodec(clazz);
+    TMemoryInputTransport buffer = new TMemoryInputTransport(buff);
+    // TODO: adela change this to be configurable in future
+    TProtocol protocol = new TFacebookCompactProtocol(buffer);
+    return codec.read(protocol);
+  }
 }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilter.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilter.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilter.java Wed Mar 12 21:17:13 2014
@@ -168,9 +168,9 @@ public class CompoundBloomFilter extends
   public String toString() {
     StringBuilder sb = new StringBuilder();
     sb.append(ByteBloomFilter.formatStats(this));
-    sb.append(ByteBloomFilter.STATS_RECORD_SEP +
+    sb.append(ByteBloomFilter.STATS_RECORD_SEP + 
         "Number of chunks: " + numChunks);
-    sb.append(ByteBloomFilter.STATS_RECORD_SEP +
+    sb.append(ByteBloomFilter.STATS_RECORD_SEP + 
         "Comparator: " + comparator.getClass().getSimpleName());
     return sb.toString();
   }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java Wed Mar 12 21:17:13 2014
@@ -28,7 +28,7 @@ public class CompoundBloomFilterBase imp
 
   /** Hash function type to use, as defined in {@link Hash} */
   protected int hashType;
-
+  
   /** Comparator used to compare Bloom filter keys */
   protected RawComparator<byte[]> comparator;
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java Wed Mar 12 21:17:13 2014
@@ -20,10 +20,8 @@
 package org.apache.hadoop.hbase.util;
 
 import java.io.DataInputStream;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
-import java.lang.reflect.InvocationTargetException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.HashMap;
@@ -48,9 +46,7 @@ import org.apache.hadoop.hbase.RemoteExc
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
 import org.apache.hadoop.io.MapWritable;
 import org.apache.hadoop.io.SequenceFile;
 
@@ -118,7 +114,7 @@ public class FSUtils {
 
   /**
    * Checks to see if the specified file system is available
-   *
+   * 
    * @param fs
    *          filesystem
    * @param shutdown
@@ -632,60 +628,14 @@ public class FSUtils {
     if (!(fs instanceof DistributedFileSystem)) {
       return true;
     }
-    long startWaiting = System.currentTimeMillis();
-
+    DistributedFileSystem dfs = (DistributedFileSystem)fs;
     boolean discardlastBlock =  conf.getBoolean("hbase.regionserver.discardLastNonExistantBlock",
                                                  true);
     LOG.info("Recovering file " + p + ", discard last block: "
         + discardlastBlock);
 
-    try {
-      try {
-        if (fs instanceof DistributedFileSystem) {
-          DistributedFileSystem dfs = (DistributedFileSystem)fs;
-          try {
-            return (Boolean) DistributedFileSystem.class.getMethod("recoverLease",
-                new Class[] {Path.class, Boolean.class}).
-                invoke(dfs, p, new Boolean(discardlastBlock));
-          } catch (NoSuchMethodException nsme) {
-            return (Boolean) DistributedFileSystem.class.getMethod("recoverLease",
-                new Class[] {Path.class}).invoke(dfs, p);
-          }
-        } else {
-          throw new Exception("Not a DistributedFileSystem");
-        }
-      } catch (InvocationTargetException ite) {
-        // function was properly called, but threw it's own exception
-        throw (IOException) ite.getCause();
-      } catch (Exception e) {
-        LOG.debug("Failed fs.recoverLease invocation, " + e.toString() +
-            ", trying fs.append instead");
-        FSDataOutputStream out = fs.append(p);
-        out.close();
-      }
-      return true;
-    } catch (IOException e) {
-      e = RemoteExceptionHandler.checkIOException(e);
-      if (e instanceof AlreadyBeingCreatedException) {
-        // We expect that we'll get this message while the lease is still
-        // within its soft limit, but if we get it past that, it means
-        // that the RS is holding onto the file even though it lost its
-        // znode. We could potentially abort after some time here.
-        long waitedFor = System.currentTimeMillis() - startWaiting;
-        if (waitedFor > FSConstants.LEASE_SOFTLIMIT_PERIOD) {
-          LOG.warn("Waited " + waitedFor + "ms for lease recovery on " + p +
-              ":" + e.getMessage());
-        }
-      } else if (e instanceof LeaseExpiredException &&
-          e.getMessage().contains("File does not exist")) {
-        // This exception comes out instead of FNFE, fix it
-        throw new FileNotFoundException(
-            "The given HLog wasn't found at " + p.toString());
-      } else {
-        throw new IOException("Failed to open " + p + " for append", e);
-      }
-    }
-    return false;
+    // Trying recovery
+    return dfs.recoverLease(p, discardlastBlock);
   }
 
   /*
@@ -709,8 +659,8 @@ public class FSUtils {
 
 
   /**
-   * Runs through the HBase rootdir and creates a reverse lookup map for
-   * table StoreFile names to the full Path.
+   * Runs through the HBase rootdir and creates a reverse lookup map for 
+   * table StoreFile names to the full Path. 
    * <br>
    * Example...<br>
    * Key = 3944417774205889744  <br>
@@ -725,17 +675,17 @@ public class FSUtils {
     final FileSystem fs, final Path hbaseRootDir)
   throws IOException {
     Map<String, Path> map = new HashMap<String, Path>();
-
-    // if this method looks similar to 'getTableFragmentation' that is because
+    
+    // if this method looks similar to 'getTableFragmentation' that is because 
     // it was borrowed from it.
-
+    
     DirFilter df = new DirFilter(fs);
     // presumes any directory under hbase.rootdir is a table
     FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
     for (FileStatus tableDir : tableDirs) {
       // Skip the .log directory.  All others should be tables.  Inside a table,
       // there are compaction.dir directories to skip.  Otherwise, all else
-      // should be regions.
+      // should be regions. 
       Path d = tableDir.getPath();
       if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
         continue;
@@ -757,7 +707,7 @@ public class FSUtils {
             Path sf = sfStatus.getPath();
             map.put( sf.getName(), sf);
           }
-
+          
         }
       }
     }
@@ -767,7 +717,7 @@ public class FSUtils {
   /**
    * This function is to scan the root path of the file system to get the
    * mapping between the region name and its best locality region server
-   *
+   * 
    * @param fs
    *          the file system to use
    * @param rootPath
@@ -790,7 +740,7 @@ public class FSUtils {
   /**
    * This function is to scan the root path of the file system to get the
    * mapping between the region name and its best locality region server
-   *
+   * 
    * @param fs
    *          the file system to use
    * @param rootPath
@@ -832,7 +782,7 @@ public class FSUtils {
     return getRegionDegreeLocalityMappingFromFS(
         conf, null,
         conf.getInt("hbase.client.localityCheck.threadPoolSize", 2));
-
+     
   }
 
   /**
@@ -854,7 +804,7 @@ public class FSUtils {
     return getRegionDegreeLocalityMappingFromFS(
         conf, tableName,
         conf.getInt("hbase.client.localityCheck.threadPoolSize", 2));
-
+     
   }
 
   /**
@@ -1006,7 +956,7 @@ public class FSUtils {
         throw new IOException(e);
       }
     }
-
+    
     long overhead = System.currentTimeMillis() - startTime;
     String overheadMsg = "Scan DFS for locality info takes " + overhead + " ms";
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Hash.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Hash.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Hash.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Hash.java Wed Mar 12 21:17:13 2014
@@ -128,7 +128,7 @@ public abstract class Hash {
   }
 
   /**
-   * Calculate a hash using bytes from <code>offset</code> to <code>offset +
+   * Calculate a hash using bytes from <code>offset</code> to <code>offset + 
    * length</code>, and the provided seed value.
    * @param bytes input bytes
    * @param offset the offset into the array to start consideration

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Histogram.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Histogram.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Histogram.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Histogram.java Wed Mar 12 21:17:13 2014
@@ -17,17 +17,14 @@
  */
 package org.apache.hadoop.hbase.util;
 
-import org.apache.hadoop.hbase.regionserver.metrics.PercentileMetric;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.commons.logging.LogFactory;
-
 import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.regionserver.metrics.PercentileMetric;
 
 /**
  * The Histogram class provides a mechanism to sample data points and perform
@@ -172,7 +169,7 @@ public class Histogram {
     double ret = 0.0;
     this.lock.writeLock().lock();
     try {
-      if (underloadSampleList.size() == 0) {
+      if (underloadSampleList.isEmpty()) {
         LOG.warn("Too few data points. Consider increasing the sampling time.");
         return ret;
       } else if (underload) {

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/IdLock.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/IdLock.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/IdLock.java Wed Mar 12 21:17:13 2014
@@ -48,6 +48,7 @@ public class IdLock {
       this.id = id;
     }
 
+    @Override
     public String toString() {
       return "id=" + id + ", numWaiter=" + numWaiters + ", isLocked="
           + isLocked;
@@ -114,7 +115,7 @@ public class IdLock {
 
   /** For testing */
   void assertMapEmpty() {
-    assert map.size() == 0;
+    assert map.isEmpty();
   }
 
 }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/InfoServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/InfoServer.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/InfoServer.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/InfoServer.java Wed Mar 12 21:17:13 2014
@@ -50,7 +50,7 @@ public class InfoServer extends HttpServ
    * increment by 1 until it finds a free port.
    * @throws IOException e
    */
-  public InfoServer(String name, String bindAddress, int port,
+  public InfoServer(String name, String bindAddress, int port, 
       boolean findPort, Configuration conf) throws IOException {
     super(name, bindAddress, port, findPort, conf);
     webServer.addHandler(new ContextHandlerCollection());
@@ -82,7 +82,7 @@ public class InfoServer extends HttpServ
       defaultContexts.put(logContext, true);
     }
     // Now bring up the task monitor
-    WebAppContext taskMonitorContext =
+    WebAppContext taskMonitorContext = 
       new WebAppContext(parent, "taskmontior", "/taskmonitor");
     taskMonitorContext.addServlet(DefaultServlet.class, "/");
     taskMonitorContext.setWar(appDir + "/taskmonitor");

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java Wed Mar 12 21:17:13 2014
@@ -87,6 +87,7 @@ public class JVMClusterUtil {
       HRegionServer server;
       try {
         server = hrsc.getConstructor(Configuration.class).newInstance(c);
+        server.initialize();
       } catch (Exception e) {
         IOException ioe = new IOException();
         ioe.initCause(e);

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java Wed Mar 12 21:17:13 2014
@@ -42,7 +42,7 @@ public class MD5Hash {
   public static String getMD5AsHex(byte[] key) {
     return getMD5AsHex(key, 0, key.length);
   }
-
+  
   /**
    * Given a byte array, returns its MD5 hash as a hex string.
    * Only "length" number of bytes starting at "offset" within the
@@ -50,7 +50,7 @@ public class MD5Hash {
    *
    * @param key the key to hash (variable length byte array)
    * @param offset
-   * @param length
+   * @param length 
    * @return MD5 hash as a 32 character hex string.
    */
   public static String getMD5AsHex(byte[] key, int offset, int length) {

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Pair.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Pair.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Pair.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Pair.java Wed Mar 12 21:17:13 2014
@@ -50,7 +50,7 @@ public class Pair<T1, T2> implements Ser
     this.first = a;
     this.second = b;
   }
-
+  
   /**
    * Constructs a new pair, inferring the type via the passed arguments
    * @param <T1> type for first
@@ -62,7 +62,7 @@ public class Pair<T1, T2> implements Ser
   public static <T1,T2> Pair<T1,T2> newPair(T1 a, T2 b) {
     return new Pair<T1,T2>(a, b);
   }
-
+  
   /**
    * Replace the first element of the pair.
    * @param a operand

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/ParamFormat.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/ParamFormat.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/ParamFormat.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/ParamFormat.java Wed Mar 12 21:17:13 2014
@@ -42,3 +42,4 @@ public @interface ParamFormat {
    */
   FormatTypes formatType() default FormatTypes.DEFAULT;
 }
+

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java Wed Mar 12 21:17:13 2014
@@ -13,7 +13,7 @@ public class RetryCounter {
   private final int retryIntervalMillis;
   private final TimeUnit timeUnit;
   
-  public RetryCounter(int maxRetries,
+  public RetryCounter(int maxRetries, 
   int retryIntervalMillis, TimeUnit timeUnit) {
     this.maxRetries = maxRetries;
     this.retriesRemaining = maxRetries;
@@ -44,7 +44,7 @@ public class RetryCounter {
   public void useRetry() {
     retriesRemaining--;
   }
-
+  
   public int getAttemptTimes() {
     return maxRetries-retriesRemaining+1;
   }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/RollingRestart.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/RollingRestart.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/RollingRestart.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/RollingRestart.java Wed Mar 12 21:17:13 2014
@@ -20,6 +20,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.MasterNotRunningException;
@@ -29,7 +30,6 @@ import org.apache.hadoop.hbase.client.HT
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.master.AssignmentPlan;
 import org.apache.hadoop.hbase.master.RegionPlacement;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 
@@ -51,6 +51,7 @@ public class RollingRestart {
   int moveTimeoutInterval = 60000;
   int moveRetries = 1;
   boolean useHadoopCtl = true;
+  private int port = HConstants.DEFAULT_REGIONSERVER_PORT;
   HashMap<HServerAddress, HRegionInterface> serverConnectionMap =
       new HashMap<HServerAddress, HRegionInterface>();
   ArrayList<RegionChecker> regionCheckers = new ArrayList<RegionChecker>();
@@ -66,7 +67,7 @@ public class RollingRestart {
   RollingRestart(String serverName, int regionDrainInterval,
       int regionUndrainInterval, int sleepIntervalAfterRestart,
       int sleepIntervalBeforeRestart, int getOpFrequency,
-      boolean useHadoopCtl) throws IOException {
+      boolean useHadoopCtl, int port) throws IOException {
 
     this.sleepIntervalAfterRestart = sleepIntervalAfterRestart;
     this.sleepIntervalBeforeRestart = sleepIntervalBeforeRestart;
@@ -74,6 +75,7 @@ public class RollingRestart {
     this.regionDrainInterval = regionDrainInterval;
     this.regionUndrainInterval = regionUndrainInterval;
     this.getOpFrequency = getOpFrequency;
+    this.port = port;
 
     conf = HBaseConfiguration.create();
     this.moveRetries = conf.getInt("hbase.rollingrestart.move.maxretries", DEFAULT_MOVE_RETRIES);
@@ -85,7 +87,7 @@ public class RollingRestart {
       currentState = STAGE.FAIL;
       return;
     }
-    this.serverAddr = new HServerAddress(serverName, 60020);
+    this.serverAddr = new HServerAddress(serverName, this.port);
 
     currentState = STAGE.SETUP;
   }
@@ -240,6 +242,9 @@ public class RollingRestart {
           break;
         }
      } catch (Exception e) {
+       if (LOG.isDebugEnabled()) {
+         e.printStackTrace();
+       }
        System.out.println("Waiting for region server to come online.");
        Thread.sleep(1000);
      }
@@ -458,6 +463,8 @@ public class RollingRestart {
 
     options.addOption("s", "server", true,
         "Name of the region server to restart");
+    options.addOption("p", "port", true,
+        "Port where the regionserver is listening");
     options.addOption("r", "sleep_after_restart", true,
         "time interval after which the region server should be started assigning regions. Default : 10000ms");
     options.addOption("b", "sleep_before_restart", true,
@@ -474,6 +481,8 @@ public class RollingRestart {
         "Don't use hadoopctl to restart the regionserver. Default : true");
     options.addOption("o", "drain_and_stop_only", false,
       "Drain and stop the region server(Works only with hadoopctl). Default : false");
+    options.addOption("drain", "drain_only", false,
+        "Drain the region server(Works only with hadoopctl). Default : false");
 
     if (args.length == 0) {
       HelpFormatter formatter = new HelpFormatter();
@@ -492,6 +501,8 @@ public class RollingRestart {
     int sleepIntervalBeforeRestart = RollingRestart.DEFAULT_SLEEP_BEFORE_RESTART_INTERVAL;
     boolean useHadoopCtl = true;
     boolean drainAndStopOnly = false;
+    boolean drainOnly = false;
+    int port = HConstants.DEFAULT_REGIONSERVER_PORT;
 
     if (!cmd.hasOption("s")) {
       HelpFormatter formatter = new HelpFormatter();
@@ -529,11 +540,19 @@ public class RollingRestart {
       getOpFrequency = Integer.parseInt(cmd.getOptionValue("g"));
     }
 
+    if (cmd.hasOption("p")) {
+      port = Integer.parseInt(cmd.getOptionValue("p"));
+    }
+
+    if (cmd.hasOption("drain")) {
+      drainOnly = true;
+    }
+
     RollingRestart rr = null;
     try {
       rr = new RollingRestart(serverName, regionDrainInterval,
           regionUndrainInterval, sleepIntervalAfterRestart,
-          sleepIntervalBeforeRestart, getOpFrequency, useHadoopCtl);
+          sleepIntervalBeforeRestart, getOpFrequency, useHadoopCtl, port);
     } catch (IOException e) {
       e.printStackTrace();
       LOG.error("Rolling restart failed for " + serverName);
@@ -551,6 +570,10 @@ public class RollingRestart {
     try  {
       rr.setup();
       rr.drainServer();
+      if (drainOnly) {
+        LOG.info("Drain completed for " + serverName);
+        return;
+      }
       rr.restart(drainAndStopOnly);
       if (!drainAndStopOnly) {
         rr.undrainServer();
@@ -581,7 +604,7 @@ public class RollingRestart {
          default:
        }
     } finally {
-      rr.clear(drainAndStopOnly);
+      rr.clear(drainOnly | drainAndStopOnly);
     }
   }
 }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java Wed Mar 12 21:17:13 2014
@@ -219,9 +219,9 @@ public class HQuorumPeer {
     LOG.trace("Created ZK properties: " + zkProperties);
     return zkProperties;
   }
-
+  
   /**
-   * Return the ZK Quorum servers string given zk properties returned by
+   * Return the ZK Quorum servers string given zk properties returned by 
    * makeZKProps
    * @param properties
    * @return

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java Wed Mar 12 21:17:13 2014
@@ -177,7 +177,7 @@ public class RecoverableZooKeeper {
             LOG.warn("Possibly transient ZooKeeper exception: " + e);
             if (!retryCounter.shouldRetry()) {
               LOG.error("ZooKeeper exists failed after "
-                + retryCounter.getMaxRetries() + " retries");
+                + retryCounter.getMaxRetries() + " retries, " + path);
               throw e;
             }
             break;
@@ -461,7 +461,7 @@ public class RecoverableZooKeeper {
               throw e;
             }
             LOG.error("Node " + path + " already exists and this is not a " +
-            		"retry");
+                "retry");
             throw e;
 
           case CONNECTIONLOSS:

Modified: hbase/branches/0.89-fb/src/main/javadoc/org/apache/hadoop/hbase/thrift/package.html
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/javadoc/org/apache/hadoop/hbase/thrift/package.html?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/javadoc/org/apache/hadoop/hbase/thrift/package.html (original)
+++ hbase/branches/0.89-fb/src/main/javadoc/org/apache/hadoop/hbase/thrift/package.html Wed Mar 12 21:17:13 2014
@@ -40,7 +40,7 @@ in the <code>contrib</code> directory.
 </p>
 
 <p>The {@link org.apache.hadoop.hbase.thrift.generated.Hbase.Iface HBase API} is defined in the
-file <a href="doc-files/index.html">Hbase.thrift</a> (Click the former to see the
+file <a href="doc-files/index.html">LegacyHBase.thrift</a> (Click the former to see the
 thrift generated documentation of thrift interface). A server-side implementation of the API is in
 {@link org.apache.hadoop.hbase.thrift.ThriftServer}. The generated interfaces,
 types, and RPC utility files reside in the
@@ -94,7 +94,7 @@ for more details on this issue.
 
 <p>The files were generated by running the commands:
 <pre>
-  thrift -strict --gen java:hashcode Hbase.thrift
+  thrift -strict --gen java:hashcode LegacyHBase.thrift
   mv gen-java/org/apache/hadoop/hbase/thrift/generated .
   rm -rf gen-java
 </pre>

Modified: hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/assignmentPlan.jsp
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/assignmentPlan.jsp?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/assignmentPlan.jsp (original)
+++ hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/assignmentPlan.jsp Wed Mar 12 21:17:13 2014
@@ -15,7 +15,7 @@
   import="org.apache.hadoop.hbase.HRegionInfo"
   import="org.apache.hadoop.hbase.master.RegionPlacement"
   import="org.apache.hadoop.hbase.master.RegionAssignmentSnapshot"
-  import="org.apache.hadoop.hbase.master.AssignmentPlan"
+  import="org.apache.hadoop.hbase.master.AssignmentPlan" 
 %><%
   HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
   Configuration conf = master.getConfiguration();
@@ -37,14 +37,14 @@
 		}	else {
 			primaryRS= request.getParameter("primaryRS");
 			favoredNodes += primaryRS + ",";
-
+		
 			secondaryRS= request.getParameter("secondaryRS");
 			favoredNodes += secondaryRS + ",";
-
+		
 			tertiaryRS= request.getParameter("tertiaryRS");
 			favoredNodes += tertiaryRS;
 			if (primaryRS == null || primaryRS.length() == 0 ||
-				secondaryRS == null || secondaryRS.length() == 0 ||
+				secondaryRS == null || secondaryRS.length() == 0 || 
 				tertiaryRS == null || tertiaryRS.length() == 0 )  {
 				error = " because the favored nodes are incomplete : " + favoredNodes;
 			} else {
@@ -59,8 +59,8 @@
 					try {
 						rp.updateAssignmentPlan(newPlan);
 					} catch (Exception e) {
-						error = " because caught some exceptions during the update " + e.toString();
-					}
+						error = " because caught some exceptions during the update " + e.toString(); 
+					} 
 				}
 			}
 		}
@@ -71,8 +71,8 @@
   Map<HRegionInfo, HServerAddress> currentAssignment = snapShot.getRegionToRegionServerMap();
 
 %><?xml version="1.0" encoding="UTF-8" ?>
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
-  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 
 <html xmlns="http://www.w3.org/1999/xhtml">
 	<head>
 		<meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
@@ -97,12 +97,12 @@
 %>
 				<h3> Succeeded to update the favored nodes: <font color="#32CD32"><%=favoredNodes%> </font>,</h3>
 				<h3> for the region: <font color="#32CD32"><%=regionName%> </font></h3>
-<%
+<%			
 			}
 %>
 		<table>
 			<tr>
-			<th>Region Name</th> <th>Position</th> <th>Primary RS</th> <th>Secondary RS</th> <th>Tertiary RS</th>  <th>Actions</th>
+    			<th>Region Name</th> <th>Position</th> <th>Primary RS</th> <th>Secondary RS</th> <th>Tertiary RS</th>  <th>Actions</th>
 			</tr>
 <%				
 					for (Map.Entry<String, HRegionInfo> entry : regionNameToRegionInfoMap.entrySet()) {
@@ -122,21 +122,21 @@
 			<tr>
 				<form method="post">
 					<input type="hidden" name="regionName" value="<%= regionName %>">
-				<td><%=regionName%> </td>
-					<%
+    				<td><%=regionName%> </td>
+					<%			
 					  if (position.startsWith("Not")) {
 					%>
 							<td><b><font color="#FF0000"><%=position%></font></b></td>
-					<%
+					<%	
 					} else if (position.equalsIgnoreCase(AssignmentPlan.POSITION.PRIMARY.toString())){
 					%>
 							<td><b><font color="#32CD32"><%=position%></font></b></td>
-					<%
+					<%	
 					} else {
 					%>
 							<td><b><font color="#FFD700"><%=position%></font></b></td>
-					<%
-					}
+					<%	
+					} 
 					%>
 					<td><input type="text" size="40" name="primaryRS" value="<%=favoredNodeList.get(AssignmentPlan.POSITION.PRIMARY.ordinal()).getHostNameWithPort()%>"</td>
 					<td><input type="text" size="40" name="secondaryRS" value="<%=favoredNodeList.get(AssignmentPlan.POSITION.SECONDARY.ordinal()).getHostNameWithPort()%>"</td>
@@ -144,8 +144,8 @@
 					<td><input type="submit" size="5" value="Update"></td>
 				</form>
 			</tr>
-<%
-				}
+<%   
+				}  
 %>
 		</table>
 	</body>

Modified: hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/master.jsp
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/master.jsp?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/master.jsp (original)
+++ hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/master.jsp Wed Mar 12 21:17:13 2014
@@ -30,8 +30,8 @@
       frags = master.getTableFragmentation();
   }
 %><?xml version="1.0" encoding="UTF-8" ?>
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
-  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 
 <html xmlns="http://www.w3.org/1999/xhtml">
 <head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
 <title>HBase Master: <%= master.getMasterAddress().getHostname()%>:<%= master.getMasterAddress().getPort() %></title>
@@ -84,7 +84,7 @@
 </table>
 
 <h2>Catalog Tables</h2>
-<%
+<% 
   if (rootLocation != null) { %>
 <table>
 <tr>
@@ -110,13 +110,13 @@
 <%  } %>
     <td>The .META. table holds references to all User Table regions</td>
 </tr>
-
+  
 <%  } %>
 </table>
 <%} %>
 
 <h2>User Tables</h2>
-<% HTableDescriptor[] tables = new HBaseAdmin(conf).listTables();
+<% HTableDescriptor[] tables = new HBaseAdmin(conf).listTables(); 
    if(tables != null && tables.length > 0) { %>
 <table>
 <tr>
@@ -143,7 +143,7 @@
 <h2>Region Servers</h2>
 <% if (serverToServerInfos != null && serverToServerInfos.size() > 0) { %>
 <%   int totalRegions = 0;
-     int totalRequests = 0;
+     int totalRequests = 0; 
 %>
 
 <table>

Modified: hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/table.jsp
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/table.jsp?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/table.jsp (original)
+++ hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/table.jsp Wed Mar 12 21:17:13 2014
@@ -8,7 +8,7 @@
   import="org.apache.hadoop.hbase.HServerAddress"
   import="org.apache.hadoop.hbase.HServerInfo"
   import="org.apache.hadoop.hbase.io.ImmutableBytesWritable"
-  import="org.apache.hadoop.hbase.master.HMaster"
+  import="org.apache.hadoop.hbase.master.HMaster" 
   import="org.apache.hadoop.hbase.master.MetaRegion"
   import="org.apache.hadoop.hbase.util.Bytes"
   import="java.util.Map"
@@ -29,8 +29,8 @@
 %>
 
 <?xml version="1.0" encoding="UTF-8" ?>
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
-  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 
 <html xmlns="http://www.w3.org/1999/xhtml">
 
 <%

Modified: hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/tablesDetailed.jsp?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/tablesDetailed.jsp (original)
+++ hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/tablesDetailed.jsp Wed Mar 12 21:17:13 2014
@@ -19,7 +19,7 @@
 <body>
 
 <h2>User Tables</h2>
-<% HTableDescriptor[] tables = new HBaseAdmin(conf).listTables();
+<% HTableDescriptor[] tables = new HBaseAdmin(conf).listTables(); 
    if(tables != null && tables.length > 0) { %>
 <table>
 <tr>

Modified: hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/zk.jsp
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/zk.jsp?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/zk.jsp (original)
+++ hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/zk.jsp Wed Mar 12 21:17:13 2014
@@ -6,15 +6,15 @@
   import="org.apache.hadoop.hbase.HRegionInfo"
   import="org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper"
   import="org.apache.hadoop.hbase.HBaseConfiguration"
-  import="org.apache.hadoop.hbase.master.HMaster"
+  import="org.apache.hadoop.hbase.master.HMaster" 
   import="org.apache.hadoop.hbase.HConstants"%><%
   HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
   ZooKeeperWrapper wrapper = master.getZooKeeperWrapper();
 %>
 
 <?xml version="1.0" encoding="UTF-8" ?>
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
-  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 
 <html xmlns="http://www.w3.org/1999/xhtml">
 <head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
 <title>ZooKeeper Dump</title>

Modified: hbase/branches/0.89-fb/src/main/resources/hbase-webapps/regionserver/regionserver.jsp
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/resources/hbase-webapps/regionserver/regionserver.jsp?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/resources/hbase-webapps/regionserver/regionserver.jsp (original)
+++ hbase/branches/0.89-fb/src/main/resources/hbase-webapps/regionserver/regionserver.jsp Wed Mar 12 21:17:13 2014
@@ -27,8 +27,8 @@
   int interval = regionServer.getConfiguration().getInt("hbase.regionserver.msginterval", 3000)/1000;
 
 %><?xml version="1.0" encoding="UTF-8" ?>
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
-  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 
 <html xmlns="http://www.w3.org/1999/xhtml">
 <head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
 <title>HBase Region Server: <%= serverInfo.getServerAddress().getHostname() %>:<%= serverInfo.getServerAddress().getPort() %></title>
@@ -71,7 +71,7 @@
     <td><%= onlineRegionInfoAndOpenDate.get(r) %></td>
 </tr>
 <%
-     }
+     } 
 %>
 </table>
 <h2>Recently Closed Regions</h2>
@@ -101,7 +101,7 @@
 <p>Region names are made of the containing table's name, a comma,
 the start key, a comma, and a randomly generated region id.  To illustrate,
 the region named
-<em>domains,apache.org,5464829424211263407</em> is party to the table
+<em>domains,apache.org,5464829424211263407</em> is party to the table 
 <em>domains</em>, has an id of <em>5464829424211263407</em> and the first key
 in the region is <em>apache.org</em>.  The <em>-ROOT-</em>
 and <em>.META.</em> 'tables' are internal sytem tables (or 'catalog' tables in db-speak).

Modified: hbase/branches/0.89-fb/src/main/resources/hbase-webapps/taskmonitor/taskmonitor.jsp
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/resources/hbase-webapps/taskmonitor/taskmonitor.jsp?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/resources/hbase-webapps/taskmonitor/taskmonitor.jsp (original)
+++ hbase/branches/0.89-fb/src/main/resources/hbase-webapps/taskmonitor/taskmonitor.jsp Wed Mar 12 21:17:13 2014
@@ -22,11 +22,11 @@
       if (!(t instanceof MonitoredRPCHandler))
         iter.remove();
     } else if (filter.equals("rpc")) {
-      if (!(t instanceof MonitoredRPCHandler) ||
+      if (!(t instanceof MonitoredRPCHandler) || 
           !((MonitoredRPCHandler) t).isRPCRunning())
         iter.remove();
     } else if (filter.equals("operation")) {
-      if (!(t instanceof MonitoredRPCHandler) ||
+      if (!(t instanceof MonitoredRPCHandler) || 
           !((MonitoredRPCHandler) t).isOperationRunning())
         iter.remove();
     }
@@ -49,8 +49,8 @@
       %><%= "]" %><%
   } else {
 %><?xml version="1.0" encoding="UTF-8" ?>
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
-  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 
 <html xmlns="http://www.w3.org/1999/xhtml">
   <head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
   <title>Task Monitor</title>
@@ -87,7 +87,7 @@
         <td>ABORTED</td>
       </tr>
     </table>
-    <p>Each task's state is indicated by its background color according to the
+    <p>Each task's state is indicated by its background color according to the 
     key.</p>
     <table style="clear:right">
       <tr>
@@ -101,11 +101,11 @@
           <td><%= new Date(task.getStartTime()) %></td>
           <td><%= task.getDescription() %></td>
           <td><%= task.getState() %>
-              (since <%= StringUtils.formatTimeDiff(now,
+              (since <%= StringUtils.formatTimeDiff(now, 
                               task.getStateTime()) %> ago)
           </td>
           <td><%= task.getStatus() %>
-              (since <%= StringUtils.formatTimeDiff(now,
+              (since <%= StringUtils.formatTimeDiff(now, 
                               task.getStatusTime()) %> ago)
           </td>
         </tr>