You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jd...@apache.org on 2013/04/17 00:21:47 UTC

svn commit: r1468644 - in /hbase/branches/0.95/hbase-server/src/main: java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java java/org/apache/hadoop/hbase/thrift/generated/TScan.java resources/org/apache/hadoop/hbase/thrift/Hbase.thrift

Author: jdcryans
Date: Tue Apr 16 22:21:46 2013
New Revision: 1468644

URL: http://svn.apache.org/r1468644
Log:
HBASE-7828  Expose HBase Scan object's "batch" property for intra-row batching
            in Thrift API (Shivendra Pratap Singh via JD)

Modified:
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java
    hbase/branches/0.95/hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java?rev=1468644&r1=1468643&r2=1468644&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java Tue Apr 16 22:21:46 2013
@@ -1161,6 +1161,9 @@ public class ThriftServerRunner implemen
         if (tScan.isSetCaching()) {
           scan.setCaching(tScan.getCaching());
         }
+        if (tScan.isSetBatchSize()) {
+          scan.setBatch(tScan.getBatchSize());
+        }
         if (tScan.isSetColumns() && tScan.getColumns().size() != 0) {
           for(ByteBuffer column : tScan.getColumns()) {
             byte [][] famQf = KeyValue.parseColumn(getBytes(column));

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java?rev=1468644&r1=1468643&r2=1468644&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java Tue Apr 16 22:21:46 2013
@@ -42,6 +42,7 @@ public class TScan implements org.apache
   private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)4);
   private static final org.apache.thrift.protocol.TField CACHING_FIELD_DESC = new org.apache.thrift.protocol.TField("caching", org.apache.thrift.protocol.TType.I32, (short)5);
   private static final org.apache.thrift.protocol.TField FILTER_STRING_FIELD_DESC = new org.apache.thrift.protocol.TField("filterString", org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField BATCH_SIZE_FIELD_DESC = new org.apache.thrift.protocol.TField("batchSize", org.apache.thrift.protocol.TType.I32, (short)7);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -55,6 +56,7 @@ public class TScan implements org.apache
   public List<ByteBuffer> columns; // optional
   public int caching; // optional
   public ByteBuffer filterString; // optional
+  public int batchSize; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -63,7 +65,8 @@ public class TScan implements org.apache
     TIMESTAMP((short)3, "timestamp"),
     COLUMNS((short)4, "columns"),
     CACHING((short)5, "caching"),
-    FILTER_STRING((short)6, "filterString");
+    FILTER_STRING((short)6, "filterString"),
+    BATCH_SIZE((short)7, "batchSize");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -90,6 +93,8 @@ public class TScan implements org.apache
           return CACHING;
         case 6: // FILTER_STRING
           return FILTER_STRING;
+        case 7: // BATCH_SIZE
+          return BATCH_SIZE;
         default:
           return null;
       }
@@ -132,8 +137,9 @@ public class TScan implements org.apache
   // isset id assignments
   private static final int __TIMESTAMP_ISSET_ID = 0;
   private static final int __CACHING_ISSET_ID = 1;
+  private static final int __BATCHSIZE_ISSET_ID = 2;
   private byte __isset_bitfield = 0;
-  private _Fields optionals[] = {_Fields.START_ROW,_Fields.STOP_ROW,_Fields.TIMESTAMP,_Fields.COLUMNS,_Fields.CACHING,_Fields.FILTER_STRING};
+  private _Fields optionals[] = {_Fields.START_ROW,_Fields.STOP_ROW,_Fields.TIMESTAMP,_Fields.COLUMNS,_Fields.CACHING,_Fields.FILTER_STRING,_Fields.BATCH_SIZE};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -150,6 +156,8 @@ public class TScan implements org.apache
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     tmpMap.put(_Fields.FILTER_STRING, new org.apache.thrift.meta_data.FieldMetaData("filterString", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
+    tmpMap.put(_Fields.BATCH_SIZE, new org.apache.thrift.meta_data.FieldMetaData("batchSize", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TScan.class, metaDataMap);
   }
@@ -180,6 +188,7 @@ public class TScan implements org.apache
     if (other.isSetFilterString()) {
       this.filterString = other.filterString;
     }
+    this.batchSize = other.batchSize;
   }
 
   public TScan deepCopy() {
@@ -196,6 +205,8 @@ public class TScan implements org.apache
     setCachingIsSet(false);
     this.caching = 0;
     this.filterString = null;
+    setBatchSizeIsSet(false);
+    this.batchSize = 0;
   }
 
   public byte[] getStartRow() {
@@ -385,6 +396,29 @@ public class TScan implements org.apache
     }
   }
 
+  public int getBatchSize() {
+    return this.batchSize;
+  }
+
+  public TScan setBatchSize(int batchSize) {
+    this.batchSize = batchSize;
+    setBatchSizeIsSet(true);
+    return this;
+  }
+
+  public void unsetBatchSize() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __BATCHSIZE_ISSET_ID);
+  }
+
+  /** Returns true if field batchSize is set (has been assigned a value) and false otherwise */
+  public boolean isSetBatchSize() {
+    return EncodingUtils.testBit(__isset_bitfield, __BATCHSIZE_ISSET_ID);
+  }
+
+  public void setBatchSizeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __BATCHSIZE_ISSET_ID, value);
+  }
+
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case START_ROW:
@@ -435,6 +469,14 @@ public class TScan implements org.apache
       }
       break;
 
+    case BATCH_SIZE:
+      if (value == null) {
+        unsetBatchSize();
+      } else {
+        setBatchSize((Integer)value);
+      }
+      break;
+
     }
   }
 
@@ -458,6 +500,9 @@ public class TScan implements org.apache
     case FILTER_STRING:
       return getFilterString();
 
+    case BATCH_SIZE:
+      return Integer.valueOf(getBatchSize());
+
     }
     throw new IllegalStateException();
   }
@@ -481,6 +526,8 @@ public class TScan implements org.apache
       return isSetCaching();
     case FILTER_STRING:
       return isSetFilterString();
+    case BATCH_SIZE:
+      return isSetBatchSize();
     }
     throw new IllegalStateException();
   }
@@ -552,6 +599,15 @@ public class TScan implements org.apache
         return false;
     }
 
+    boolean this_present_batchSize = true && this.isSetBatchSize();
+    boolean that_present_batchSize = true && that.isSetBatchSize();
+    if (this_present_batchSize || that_present_batchSize) {
+      if (!(this_present_batchSize && that_present_batchSize))
+        return false;
+      if (this.batchSize != that.batchSize)
+        return false;
+    }
+
     return true;
   }
 
@@ -628,6 +684,16 @@ public class TScan implements org.apache
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(isSetBatchSize()).compareTo(typedOther.isSetBatchSize());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetBatchSize()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.batchSize, typedOther.batchSize);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -699,6 +765,12 @@ public class TScan implements org.apache
       }
       first = false;
     }
+    if (isSetBatchSize()) {
+      if (!first) sb.append(", ");
+      sb.append("batchSize:");
+      sb.append(this.batchSize);
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
@@ -802,6 +874,14 @@ public class TScan implements org.apache
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 7: // BATCH_SIZE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.batchSize = iprot.readI32();
+              struct.setBatchSizeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -862,6 +942,11 @@ public class TScan implements org.apache
           oprot.writeFieldEnd();
         }
       }
+      if (struct.isSetBatchSize()) {
+        oprot.writeFieldBegin(BATCH_SIZE_FIELD_DESC);
+        oprot.writeI32(struct.batchSize);
+        oprot.writeFieldEnd();
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -898,7 +983,10 @@ public class TScan implements org.apache
       if (struct.isSetFilterString()) {
         optionals.set(5);
       }
-      oprot.writeBitSet(optionals, 6);
+      if (struct.isSetBatchSize()) {
+        optionals.set(6);
+      }
+      oprot.writeBitSet(optionals, 7);
       if (struct.isSetStartRow()) {
         oprot.writeBinary(struct.startRow);
       }
@@ -923,12 +1011,15 @@ public class TScan implements org.apache
       if (struct.isSetFilterString()) {
         oprot.writeBinary(struct.filterString);
       }
+      if (struct.isSetBatchSize()) {
+        oprot.writeI32(struct.batchSize);
+      }
     }
 
     @Override
     public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
-      BitSet incoming = iprot.readBitSet(6);
+      BitSet incoming = iprot.readBitSet(7);
       if (incoming.get(0)) {
         struct.startRow = iprot.readBinary();
         struct.setStartRowIsSet(true);
@@ -962,6 +1053,10 @@ public class TScan implements org.apache
         struct.filterString = iprot.readBinary();
         struct.setFilterStringIsSet(true);
       }
+      if (incoming.get(6)) {
+        struct.batchSize = iprot.readI32();
+        struct.setBatchSizeIsSet(true);
+      }
     }
   }
 

Modified: hbase/branches/0.95/hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift?rev=1468644&r1=1468643&r2=1468644&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift (original)
+++ hbase/branches/0.95/hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift Tue Apr 16 22:21:46 2013
@@ -138,7 +138,8 @@ struct TScan {
   3:optional i64 timestamp,
   4:optional list<Text> columns,
   5:optional i32 caching,
-  6:optional Text filterString
+  6:optional Text filterString,
+  7:optional i32 batchSize
 }
 
 //