You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by ja...@apache.org on 2014/05/28 19:58:55 UTC

[1/6] Backport MultiSliceRequest

Repository: cassandra
Updated Branches:
  refs/heads/trunk d151b402d -> 235bae7b3


http://git-wip-us.apache.org/repos/asf/cassandra/blob/7f63b1f9/interface/thrift/gen-java/org/apache/cassandra/thrift/MultiSliceRequest.java
----------------------------------------------------------------------
diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/MultiSliceRequest.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/MultiSliceRequest.java
new file mode 100644
index 0000000..9d4878c
--- /dev/null
+++ b/interface/thrift/gen-java/org/apache/cassandra/thrift/MultiSliceRequest.java
@@ -0,0 +1,1042 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.1)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.cassandra.thrift;
+/*
+ * 
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * 
+ */
+
+
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Used to perform multiple slices on a single row key in one rpc operation
+ * @param key. The row key to be multi sliced
+ * @param column_parent. The column family (super columns are unsupported)
+ * @param column_slices. 0 to many ColumnSlice objects each will be used to select columns
+ * @param reversed. Direction of slice
+ * @param count. Maximum number of columns
+ * @param consistency_level. Level to perform the operation at
+ */
+public class MultiSliceRequest implements org.apache.thrift.TBase<MultiSliceRequest, MultiSliceRequest._Fields>, java.io.Serializable, Cloneable, Comparable<MultiSliceRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MultiSliceRequest");
+
+  private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField COLUMN_PARENT_FIELD_DESC = new org.apache.thrift.protocol.TField("column_parent", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+  private static final org.apache.thrift.protocol.TField COLUMN_SLICES_FIELD_DESC = new org.apache.thrift.protocol.TField("column_slices", org.apache.thrift.protocol.TType.LIST, (short)3);
+  private static final org.apache.thrift.protocol.TField REVERSED_FIELD_DESC = new org.apache.thrift.protocol.TField("reversed", org.apache.thrift.protocol.TType.BOOL, (short)4);
+  private static final org.apache.thrift.protocol.TField COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("count", org.apache.thrift.protocol.TType.I32, (short)5);
+  private static final org.apache.thrift.protocol.TField CONSISTENCY_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency_level", org.apache.thrift.protocol.TType.I32, (short)6);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new MultiSliceRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new MultiSliceRequestTupleSchemeFactory());
+  }
+
+  public ByteBuffer key; // optional
+  public ColumnParent column_parent; // optional
+  public List<ColumnSlice> column_slices; // optional
+  public boolean reversed; // optional
+  public int count; // optional
+  /**
+   * 
+   * @see ConsistencyLevel
+   */
+  public ConsistencyLevel consistency_level; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    KEY((short)1, "key"),
+    COLUMN_PARENT((short)2, "column_parent"),
+    COLUMN_SLICES((short)3, "column_slices"),
+    REVERSED((short)4, "reversed"),
+    COUNT((short)5, "count"),
+    /**
+     * 
+     * @see ConsistencyLevel
+     */
+    CONSISTENCY_LEVEL((short)6, "consistency_level");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // KEY
+          return KEY;
+        case 2: // COLUMN_PARENT
+          return COLUMN_PARENT;
+        case 3: // COLUMN_SLICES
+          return COLUMN_SLICES;
+        case 4: // REVERSED
+          return REVERSED;
+        case 5: // COUNT
+          return COUNT;
+        case 6: // CONSISTENCY_LEVEL
+          return CONSISTENCY_LEVEL;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __REVERSED_ISSET_ID = 0;
+  private static final int __COUNT_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private _Fields optionals[] = {_Fields.KEY,_Fields.COLUMN_PARENT,_Fields.COLUMN_SLICES,_Fields.REVERSED,_Fields.COUNT,_Fields.CONSISTENCY_LEVEL};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    tmpMap.put(_Fields.COLUMN_PARENT, new org.apache.thrift.meta_data.FieldMetaData("column_parent", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnParent.class)));
+    tmpMap.put(_Fields.COLUMN_SLICES, new org.apache.thrift.meta_data.FieldMetaData("column_slices", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnSlice.class))));
+    tmpMap.put(_Fields.REVERSED, new org.apache.thrift.meta_data.FieldMetaData("reversed", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.COUNT, new org.apache.thrift.meta_data.FieldMetaData("count", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.CONSISTENCY_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("consistency_level", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ConsistencyLevel.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(MultiSliceRequest.class, metaDataMap);
+  }
+
+  public MultiSliceRequest() {
+    this.reversed = false;
+
+    this.count = 1000;
+
+    this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE;
+
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public MultiSliceRequest(MultiSliceRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetKey()) {
+      this.key = org.apache.thrift.TBaseHelper.copyBinary(other.key);
+;
+    }
+    if (other.isSetColumn_parent()) {
+      this.column_parent = new ColumnParent(other.column_parent);
+    }
+    if (other.isSetColumn_slices()) {
+      List<ColumnSlice> __this__column_slices = new ArrayList<ColumnSlice>(other.column_slices.size());
+      for (ColumnSlice other_element : other.column_slices) {
+        __this__column_slices.add(new ColumnSlice(other_element));
+      }
+      this.column_slices = __this__column_slices;
+    }
+    this.reversed = other.reversed;
+    this.count = other.count;
+    if (other.isSetConsistency_level()) {
+      this.consistency_level = other.consistency_level;
+    }
+  }
+
+  public MultiSliceRequest deepCopy() {
+    return new MultiSliceRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.key = null;
+    this.column_parent = null;
+    this.column_slices = null;
+    this.reversed = false;
+
+    this.count = 1000;
+
+    this.consistency_level = org.apache.cassandra.thrift.ConsistencyLevel.ONE;
+
+  }
+
+  public byte[] getKey() {
+    setKey(org.apache.thrift.TBaseHelper.rightSize(key));
+    return key == null ? null : key.array();
+  }
+
+  public ByteBuffer bufferForKey() {
+    return key;
+  }
+
+  public MultiSliceRequest setKey(byte[] key) {
+    setKey(key == null ? (ByteBuffer)null : ByteBuffer.wrap(key));
+    return this;
+  }
+
+  public MultiSliceRequest setKey(ByteBuffer key) {
+    this.key = key;
+    return this;
+  }
+
+  public void unsetKey() {
+    this.key = null;
+  }
+
+  /** Returns true if field key is set (has been assigned a value) and false otherwise */
+  public boolean isSetKey() {
+    return this.key != null;
+  }
+
+  public void setKeyIsSet(boolean value) {
+    if (!value) {
+      this.key = null;
+    }
+  }
+
+  public ColumnParent getColumn_parent() {
+    return this.column_parent;
+  }
+
+  public MultiSliceRequest setColumn_parent(ColumnParent column_parent) {
+    this.column_parent = column_parent;
+    return this;
+  }
+
+  public void unsetColumn_parent() {
+    this.column_parent = null;
+  }
+
+  /** Returns true if field column_parent is set (has been assigned a value) and false otherwise */
+  public boolean isSetColumn_parent() {
+    return this.column_parent != null;
+  }
+
+  public void setColumn_parentIsSet(boolean value) {
+    if (!value) {
+      this.column_parent = null;
+    }
+  }
+
+  public int getColumn_slicesSize() {
+    return (this.column_slices == null) ? 0 : this.column_slices.size();
+  }
+
+  public java.util.Iterator<ColumnSlice> getColumn_slicesIterator() {
+    return (this.column_slices == null) ? null : this.column_slices.iterator();
+  }
+
+  public void addToColumn_slices(ColumnSlice elem) {
+    if (this.column_slices == null) {
+      this.column_slices = new ArrayList<ColumnSlice>();
+    }
+    this.column_slices.add(elem);
+  }
+
+  public List<ColumnSlice> getColumn_slices() {
+    return this.column_slices;
+  }
+
+  public MultiSliceRequest setColumn_slices(List<ColumnSlice> column_slices) {
+    this.column_slices = column_slices;
+    return this;
+  }
+
+  public void unsetColumn_slices() {
+    this.column_slices = null;
+  }
+
+  /** Returns true if field column_slices is set (has been assigned a value) and false otherwise */
+  public boolean isSetColumn_slices() {
+    return this.column_slices != null;
+  }
+
+  public void setColumn_slicesIsSet(boolean value) {
+    if (!value) {
+      this.column_slices = null;
+    }
+  }
+
+  public boolean isReversed() {
+    return this.reversed;
+  }
+
+  public MultiSliceRequest setReversed(boolean reversed) {
+    this.reversed = reversed;
+    setReversedIsSet(true);
+    return this;
+  }
+
+  public void unsetReversed() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REVERSED_ISSET_ID);
+  }
+
+  /** Returns true if field reversed is set (has been assigned a value) and false otherwise */
+  public boolean isSetReversed() {
+    return EncodingUtils.testBit(__isset_bitfield, __REVERSED_ISSET_ID);
+  }
+
+  public void setReversedIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REVERSED_ISSET_ID, value);
+  }
+
+  public int getCount() {
+    return this.count;
+  }
+
+  public MultiSliceRequest setCount(int count) {
+    this.count = count;
+    setCountIsSet(true);
+    return this;
+  }
+
+  public void unsetCount() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __COUNT_ISSET_ID);
+  }
+
+  /** Returns true if field count is set (has been assigned a value) and false otherwise */
+  public boolean isSetCount() {
+    return EncodingUtils.testBit(__isset_bitfield, __COUNT_ISSET_ID);
+  }
+
+  public void setCountIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __COUNT_ISSET_ID, value);
+  }
+
+  /**
+   * 
+   * @see ConsistencyLevel
+   */
+  public ConsistencyLevel getConsistency_level() {
+    return this.consistency_level;
+  }
+
+  /**
+   * 
+   * @see ConsistencyLevel
+   */
+  public MultiSliceRequest setConsistency_level(ConsistencyLevel consistency_level) {
+    this.consistency_level = consistency_level;
+    return this;
+  }
+
+  public void unsetConsistency_level() {
+    this.consistency_level = null;
+  }
+
+  /** Returns true if field consistency_level is set (has been assigned a value) and false otherwise */
+  public boolean isSetConsistency_level() {
+    return this.consistency_level != null;
+  }
+
+  public void setConsistency_levelIsSet(boolean value) {
+    if (!value) {
+      this.consistency_level = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case KEY:
+      if (value == null) {
+        unsetKey();
+      } else {
+        setKey((ByteBuffer)value);
+      }
+      break;
+
+    case COLUMN_PARENT:
+      if (value == null) {
+        unsetColumn_parent();
+      } else {
+        setColumn_parent((ColumnParent)value);
+      }
+      break;
+
+    case COLUMN_SLICES:
+      if (value == null) {
+        unsetColumn_slices();
+      } else {
+        setColumn_slices((List<ColumnSlice>)value);
+      }
+      break;
+
+    case REVERSED:
+      if (value == null) {
+        unsetReversed();
+      } else {
+        setReversed((Boolean)value);
+      }
+      break;
+
+    case COUNT:
+      if (value == null) {
+        unsetCount();
+      } else {
+        setCount((Integer)value);
+      }
+      break;
+
+    case CONSISTENCY_LEVEL:
+      if (value == null) {
+        unsetConsistency_level();
+      } else {
+        setConsistency_level((ConsistencyLevel)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case KEY:
+      return getKey();
+
+    case COLUMN_PARENT:
+      return getColumn_parent();
+
+    case COLUMN_SLICES:
+      return getColumn_slices();
+
+    case REVERSED:
+      return Boolean.valueOf(isReversed());
+
+    case COUNT:
+      return Integer.valueOf(getCount());
+
+    case CONSISTENCY_LEVEL:
+      return getConsistency_level();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case KEY:
+      return isSetKey();
+    case COLUMN_PARENT:
+      return isSetColumn_parent();
+    case COLUMN_SLICES:
+      return isSetColumn_slices();
+    case REVERSED:
+      return isSetReversed();
+    case COUNT:
+      return isSetCount();
+    case CONSISTENCY_LEVEL:
+      return isSetConsistency_level();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof MultiSliceRequest)
+      return this.equals((MultiSliceRequest)that);
+    return false;
+  }
+
+  public boolean equals(MultiSliceRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_key = true && this.isSetKey();
+    boolean that_present_key = true && that.isSetKey();
+    if (this_present_key || that_present_key) {
+      if (!(this_present_key && that_present_key))
+        return false;
+      if (!this.key.equals(that.key))
+        return false;
+    }
+
+    boolean this_present_column_parent = true && this.isSetColumn_parent();
+    boolean that_present_column_parent = true && that.isSetColumn_parent();
+    if (this_present_column_parent || that_present_column_parent) {
+      if (!(this_present_column_parent && that_present_column_parent))
+        return false;
+      if (!this.column_parent.equals(that.column_parent))
+        return false;
+    }
+
+    boolean this_present_column_slices = true && this.isSetColumn_slices();
+    boolean that_present_column_slices = true && that.isSetColumn_slices();
+    if (this_present_column_slices || that_present_column_slices) {
+      if (!(this_present_column_slices && that_present_column_slices))
+        return false;
+      if (!this.column_slices.equals(that.column_slices))
+        return false;
+    }
+
+    boolean this_present_reversed = true && this.isSetReversed();
+    boolean that_present_reversed = true && that.isSetReversed();
+    if (this_present_reversed || that_present_reversed) {
+      if (!(this_present_reversed && that_present_reversed))
+        return false;
+      if (this.reversed != that.reversed)
+        return false;
+    }
+
+    boolean this_present_count = true && this.isSetCount();
+    boolean that_present_count = true && that.isSetCount();
+    if (this_present_count || that_present_count) {
+      if (!(this_present_count && that_present_count))
+        return false;
+      if (this.count != that.count)
+        return false;
+    }
+
+    boolean this_present_consistency_level = true && this.isSetConsistency_level();
+    boolean that_present_consistency_level = true && that.isSetConsistency_level();
+    if (this_present_consistency_level || that_present_consistency_level) {
+      if (!(this_present_consistency_level && that_present_consistency_level))
+        return false;
+      if (!this.consistency_level.equals(that.consistency_level))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    HashCodeBuilder builder = new HashCodeBuilder();
+
+    boolean present_key = true && (isSetKey());
+    builder.append(present_key);
+    if (present_key)
+      builder.append(key);
+
+    boolean present_column_parent = true && (isSetColumn_parent());
+    builder.append(present_column_parent);
+    if (present_column_parent)
+      builder.append(column_parent);
+
+    boolean present_column_slices = true && (isSetColumn_slices());
+    builder.append(present_column_slices);
+    if (present_column_slices)
+      builder.append(column_slices);
+
+    boolean present_reversed = true && (isSetReversed());
+    builder.append(present_reversed);
+    if (present_reversed)
+      builder.append(reversed);
+
+    boolean present_count = true && (isSetCount());
+    builder.append(present_count);
+    if (present_count)
+      builder.append(count);
+
+    boolean present_consistency_level = true && (isSetConsistency_level());
+    builder.append(present_consistency_level);
+    if (present_consistency_level)
+      builder.append(consistency_level.getValue());
+
+    return builder.toHashCode();
+  }
+
+  @Override
+  public int compareTo(MultiSliceRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetKey()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetColumn_parent()).compareTo(other.isSetColumn_parent());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColumn_parent()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_parent, other.column_parent);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetColumn_slices()).compareTo(other.isSetColumn_slices());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColumn_slices()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_slices, other.column_slices);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetReversed()).compareTo(other.isSetReversed());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetReversed()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.reversed, other.reversed);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCount()).compareTo(other.isSetCount());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCount()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.count, other.count);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetConsistency_level()).compareTo(other.isSetConsistency_level());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetConsistency_level()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.consistency_level, other.consistency_level);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("MultiSliceRequest(");
+    boolean first = true;
+
+    if (isSetKey()) {
+      sb.append("key:");
+      if (this.key == null) {
+        sb.append("null");
+      } else {
+        org.apache.thrift.TBaseHelper.toString(this.key, sb);
+      }
+      first = false;
+    }
+    if (isSetColumn_parent()) {
+      if (!first) sb.append(", ");
+      sb.append("column_parent:");
+      if (this.column_parent == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.column_parent);
+      }
+      first = false;
+    }
+    if (isSetColumn_slices()) {
+      if (!first) sb.append(", ");
+      sb.append("column_slices:");
+      if (this.column_slices == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.column_slices);
+      }
+      first = false;
+    }
+    if (isSetReversed()) {
+      if (!first) sb.append(", ");
+      sb.append("reversed:");
+      sb.append(this.reversed);
+      first = false;
+    }
+    if (isSetCount()) {
+      if (!first) sb.append(", ");
+      sb.append("count:");
+      sb.append(this.count);
+      first = false;
+    }
+    if (isSetConsistency_level()) {
+      if (!first) sb.append(", ");
+      sb.append("consistency_level:");
+      if (this.consistency_level == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.consistency_level);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (column_parent != null) {
+      column_parent.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class MultiSliceRequestStandardSchemeFactory implements SchemeFactory {
+    public MultiSliceRequestStandardScheme getScheme() {
+      return new MultiSliceRequestStandardScheme();
+    }
+  }
+
+  private static class MultiSliceRequestStandardScheme extends StandardScheme<MultiSliceRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, MultiSliceRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // KEY
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.key = iprot.readBinary();
+              struct.setKeyIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // COLUMN_PARENT
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.column_parent = new ColumnParent();
+              struct.column_parent.read(iprot);
+              struct.setColumn_parentIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // COLUMN_SLICES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list216 = iprot.readListBegin();
+                struct.column_slices = new ArrayList<ColumnSlice>(_list216.size);
+                for (int _i217 = 0; _i217 < _list216.size; ++_i217)
+                {
+                  ColumnSlice _elem218;
+                  _elem218 = new ColumnSlice();
+                  _elem218.read(iprot);
+                  struct.column_slices.add(_elem218);
+                }
+                iprot.readListEnd();
+              }
+              struct.setColumn_slicesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // REVERSED
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.reversed = iprot.readBool();
+              struct.setReversedIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // COUNT
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.count = iprot.readI32();
+              struct.setCountIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // CONSISTENCY_LEVEL
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32());
+              struct.setConsistency_levelIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+
+      // check for required fields of primitive type, which can't be checked in the validate method
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, MultiSliceRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.key != null) {
+        if (struct.isSetKey()) {
+          oprot.writeFieldBegin(KEY_FIELD_DESC);
+          oprot.writeBinary(struct.key);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.column_parent != null) {
+        if (struct.isSetColumn_parent()) {
+          oprot.writeFieldBegin(COLUMN_PARENT_FIELD_DESC);
+          struct.column_parent.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.column_slices != null) {
+        if (struct.isSetColumn_slices()) {
+          oprot.writeFieldBegin(COLUMN_SLICES_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.column_slices.size()));
+            for (ColumnSlice _iter219 : struct.column_slices)
+            {
+              _iter219.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.isSetReversed()) {
+        oprot.writeFieldBegin(REVERSED_FIELD_DESC);
+        oprot.writeBool(struct.reversed);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetCount()) {
+        oprot.writeFieldBegin(COUNT_FIELD_DESC);
+        oprot.writeI32(struct.count);
+        oprot.writeFieldEnd();
+      }
+      if (struct.consistency_level != null) {
+        if (struct.isSetConsistency_level()) {
+          oprot.writeFieldBegin(CONSISTENCY_LEVEL_FIELD_DESC);
+          oprot.writeI32(struct.consistency_level.getValue());
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class MultiSliceRequestTupleSchemeFactory implements SchemeFactory {
+    public MultiSliceRequestTupleScheme getScheme() {
+      return new MultiSliceRequestTupleScheme();
+    }
+  }
+
+  private static class MultiSliceRequestTupleScheme extends TupleScheme<MultiSliceRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, MultiSliceRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetKey()) {
+        optionals.set(0);
+      }
+      if (struct.isSetColumn_parent()) {
+        optionals.set(1);
+      }
+      if (struct.isSetColumn_slices()) {
+        optionals.set(2);
+      }
+      if (struct.isSetReversed()) {
+        optionals.set(3);
+      }
+      if (struct.isSetCount()) {
+        optionals.set(4);
+      }
+      if (struct.isSetConsistency_level()) {
+        optionals.set(5);
+      }
+      oprot.writeBitSet(optionals, 6);
+      if (struct.isSetKey()) {
+        oprot.writeBinary(struct.key);
+      }
+      if (struct.isSetColumn_parent()) {
+        struct.column_parent.write(oprot);
+      }
+      if (struct.isSetColumn_slices()) {
+        {
+          oprot.writeI32(struct.column_slices.size());
+          for (ColumnSlice _iter220 : struct.column_slices)
+          {
+            _iter220.write(oprot);
+          }
+        }
+      }
+      if (struct.isSetReversed()) {
+        oprot.writeBool(struct.reversed);
+      }
+      if (struct.isSetCount()) {
+        oprot.writeI32(struct.count);
+      }
+      if (struct.isSetConsistency_level()) {
+        oprot.writeI32(struct.consistency_level.getValue());
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, MultiSliceRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(6);
+      if (incoming.get(0)) {
+        struct.key = iprot.readBinary();
+        struct.setKeyIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.column_parent = new ColumnParent();
+        struct.column_parent.read(iprot);
+        struct.setColumn_parentIsSet(true);
+      }
+      if (incoming.get(2)) {
+        {
+          org.apache.thrift.protocol.TList _list221 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.column_slices = new ArrayList<ColumnSlice>(_list221.size);
+          for (int _i222 = 0; _i222 < _list221.size; ++_i222)
+          {
+            ColumnSlice _elem223;
+            _elem223 = new ColumnSlice();
+            _elem223.read(iprot);
+            struct.column_slices.add(_elem223);
+          }
+        }
+        struct.setColumn_slicesIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.reversed = iprot.readBool();
+        struct.setReversedIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.count = iprot.readI32();
+        struct.setCountIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32());
+        struct.setConsistency_levelIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7f63b1f9/src/java/org/apache/cassandra/thrift/CassandraServer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/CassandraServer.java b/src/java/org/apache/cassandra/thrift/CassandraServer.java
index 5a296f3..917e4cb 100644
--- a/src/java/org/apache/cassandra/thrift/CassandraServer.java
+++ b/src/java/org/apache/cassandra/thrift/CassandraServer.java
@@ -2036,6 +2036,74 @@ public class CassandraServer implements Cassandra.Iface
         }
     }
 
+    @Override
+    public List<ColumnOrSuperColumn> get_multi_slice(MultiSliceRequest request)
+            throws InvalidRequestException, UnavailableException, TimedOutException
+    {
+        if (startSessionIfRequested())
+        {
+            Map<String, String> traceParameters = ImmutableMap.of("key", ByteBufferUtil.bytesToHex(request.key),
+                                                                  "column_parent", request.column_parent.toString(),
+                                                                  "consistency_level", request.consistency_level.name(),
+                                                                  "count", String.valueOf(request.count),
+                                                                  "column_slices", request.column_slices.toString());
+            Tracing.instance.begin("get_multi_slice", traceParameters);
+        }
+        else
+        {
+            logger.debug("get_multi_slice");
+        }
+        try
+        {
+            ClientState cState = state();
+            String keyspace = cState.getKeyspace();
+            state().hasColumnFamilyAccess(keyspace, request.getColumn_parent().column_family, Permission.SELECT);
+            CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace, request.getColumn_parent().column_family);
+            if (metadata.cfType == ColumnFamilyType.Super)
+                throw new org.apache.cassandra.exceptions.InvalidRequestException("get_multi_slice does not support super columns");
+            ThriftValidation.validateColumnParent(metadata, request.getColumn_parent());
+            org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(request.getConsistency_level());
+            consistencyLevel.validateForRead(keyspace);
+            List<ReadCommand> commands = new ArrayList<>(1);
+            ColumnSlice [] slices = new ColumnSlice[request.getColumn_slices().size()];
+            for (int i = 0 ; i < request.getColumn_slices().size() ; i++)
+            {
+                fixOptionalSliceParameters(request.getColumn_slices().get(i));
+                Composite start = metadata.comparator.fromByteBuffer(request.getColumn_slices().get(i).start);
+                Composite finish = metadata.comparator.fromByteBuffer(request.getColumn_slices().get(i).finish);
+                int compare = metadata.comparator.compare(start, finish);
+                if (!request.reversed && compare > 0)
+                    throw new InvalidRequestException(String.format("Column slice at index %d had start greater than finish", i));
+                else if (request.reversed && compare < 0)
+                    throw new InvalidRequestException(String.format("Reversed column slice at index %d had start less than finish", i));
+                slices[i] = new ColumnSlice(start, finish);
+            }
+            SliceQueryFilter filter = new SliceQueryFilter(slices, request.reversed, request.count);
+            ThriftValidation.validateKey(metadata, request.key);
+            commands.add(ReadCommand.create(keyspace, request.key, request.column_parent.getColumn_family(), System.currentTimeMillis(), filter));
+            return getSlice(commands, request.column_parent.isSetSuper_column(), consistencyLevel).entrySet().iterator().next().getValue();
+        }
+        catch (RequestValidationException e)
+        {
+            throw ThriftConversion.toThrift(e);
+        }
+        finally
+        {
+            Tracing.instance.stopSession();
+        }
+    }
+
+    /**
+     * Set the to start-of end-of value of "" for start and finish.
+     * @param columnSlice
+     */
+    private static void fixOptionalSliceParameters(org.apache.cassandra.thrift.ColumnSlice columnSlice) {
+        if (!columnSlice.isSetStart())
+            columnSlice.setStart(new byte[0]);
+        if (!columnSlice.isSetFinish())
+            columnSlice.setFinish(new byte[0]);
+    }
+
     public CqlResult execute_prepared_cql_query(int itemId, List<ByteBuffer> bindVariables)
     throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException
     {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7f63b1f9/test/unit/org/apache/cassandra/thrift/MultiSliceTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/thrift/MultiSliceTest.java b/test/unit/org/apache/cassandra/thrift/MultiSliceTest.java
new file mode 100644
index 0000000..d1c913b
--- /dev/null
+++ b/test/unit/org/apache/cassandra/thrift/MultiSliceTest.java
@@ -0,0 +1,149 @@
+package org.apache.cassandra.thrift;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.config.Schema;
+import org.apache.cassandra.service.EmbeddedCassandraService;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.thrift.TException;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class MultiSliceTest extends SchemaLoader
+{
+    private static CassandraServer server;
+    
+    @BeforeClass
+    public static void setup() throws IOException, TException 
+    {
+        Schema.instance.clear(); // Schema are now written on disk and will be reloaded
+        new EmbeddedCassandraService().start();
+        ThriftSessionManager.instance.setCurrentSocket(new InetSocketAddress(9160));        
+        server = new CassandraServer();
+        server.set_keyspace("Keyspace1");
+    }
+
+    private static MultiSliceRequest makeMultiSliceRequest(ByteBuffer key)
+    {
+        ColumnParent cp = new ColumnParent("Standard1");
+        MultiSliceRequest req = new MultiSliceRequest();
+        req.setKey(key);
+        req.setCount(1000);
+        req.reversed = false;
+        req.setColumn_parent(cp);
+        return req;
+    }
+    
+    @Test
+    public void test_multi_slice_optional_column_slice() throws TException
+    {
+        ColumnParent cp = new ColumnParent("Standard1");
+        ByteBuffer key = ByteBuffer.wrap("multi_slice".getBytes());
+        List<String> expected = new ArrayList<String>();
+        for (char a = 'a'; a <= 'z'; a++)
+            expected.add(a + "");
+
+        addTheAlphabetToRow(key, cp);
+        MultiSliceRequest req = makeMultiSliceRequest(key);
+        req.setColumn_slices(new ArrayList<ColumnSlice>());
+        req.getColumn_slices().add(new ColumnSlice());
+        List<ColumnOrSuperColumn> list = server.get_multi_slice(req);
+        assertColumnNameMatches(expected, list);
+    }
+    
+    @Test
+    public void test_multi_slice() throws TException
+    {
+        ColumnParent cp = new ColumnParent("Standard1");
+        ByteBuffer key = ByteBuffer.wrap("multi_slice_two_slice".getBytes());
+        addTheAlphabetToRow(key, cp);
+        MultiSliceRequest req = makeMultiSliceRequest(key);
+        req.setColumn_slices(Arrays.asList(columnSliceFrom("a", "e"), columnSliceFrom("i", "n")));
+        assertColumnNameMatches(Arrays.asList("a", "b", "c", "d", "e", "i", "j", "k" , "l", "m" , "n"), server.get_multi_slice(req));
+    }
+    
+    @Test
+    public void test_with_overlap() throws TException
+    {
+        ColumnParent cp = new ColumnParent("Standard1");
+        ByteBuffer key = ByteBuffer.wrap("overlap".getBytes());
+        addTheAlphabetToRow(key, cp);
+        MultiSliceRequest req = makeMultiSliceRequest(key);
+        req.setColumn_slices(Arrays.asList(columnSliceFrom("a", "e"), columnSliceFrom("d", "g")));
+        assertColumnNameMatches(Arrays.asList("a", "b", "c", "d", "e", "f", "g"), server.get_multi_slice(req));
+    }
+    
+    @Test
+    public void test_with_overlap_reversed() throws TException
+    {
+        ColumnParent cp = new ColumnParent("Standard1");
+        ByteBuffer key = ByteBuffer.wrap("overlap_reversed".getBytes());
+        addTheAlphabetToRow(key, cp);
+        MultiSliceRequest req = makeMultiSliceRequest(key);
+        req.reversed = true;
+        req.setColumn_slices(Arrays.asList(columnSliceFrom("e", "a"), columnSliceFrom("g", "d")));
+        assertColumnNameMatches(Arrays.asList("g", "f", "e", "d", "c", "b", "a"), server.get_multi_slice(req));
+    }
+
+    @Test(expected=InvalidRequestException.class)
+    public void test_that_column_slice_is_proper() throws TException
+    {
+      ByteBuffer key = ByteBuffer.wrap("overlap".getBytes());
+      MultiSliceRequest req = makeMultiSliceRequest(key);
+      req.reversed = true;
+      req.setColumn_slices(Arrays.asList(columnSliceFrom("a", "e"), columnSliceFrom("g", "d")));
+      assertColumnNameMatches(Arrays.asList("a", "b", "c", "d", "e", "f", "g"), server.get_multi_slice(req));
+    }
+    
+    @Test
+    public void test_with_overlap_reversed_with_count() throws TException
+    {
+        ColumnParent cp = new ColumnParent("Standard1");
+        ByteBuffer key = ByteBuffer.wrap("overlap_reversed_count".getBytes());
+        addTheAlphabetToRow(key, cp);
+        MultiSliceRequest req = makeMultiSliceRequest(key);
+        req.setCount(6);
+        req.reversed = true;
+        req.setColumn_slices(Arrays.asList(columnSliceFrom("e", "a"), columnSliceFrom("g", "d")));
+        assertColumnNameMatches(Arrays.asList("g", "e", "d", "c", "b", "a"), server.get_multi_slice(req)); 
+    }
+    
+    private static void addTheAlphabetToRow(ByteBuffer key, ColumnParent parent) 
+            throws InvalidRequestException, UnavailableException, TimedOutException
+    {
+        for (char a = 'a'; a <= 'z'; a++) {
+            Column c1 = new Column();
+            c1.setName(ByteBufferUtil.bytes(String.valueOf(a)));
+            c1.setValue(new byte [0]);
+            c1.setTimestamp(System.nanoTime());
+            server.insert(key, parent, c1, ConsistencyLevel.ONE); 
+         }
+    }
+    
+    private static void assertColumnNameMatches(List<String> expected , List<ColumnOrSuperColumn> actual)
+    {
+        Assert.assertEquals(actual+" "+expected +" did not have same number of elements", actual.size(), expected.size());
+        for (int i = 0 ; i< expected.size() ; i++)
+        {
+            Assert.assertEquals(actual.get(i) +" did not equal "+ expected.get(i), 
+                    new String(actual.get(i).getColumn().getName()), expected.get(i));
+        }
+    }
+    
+    private ColumnSlice columnSliceFrom(String startInclusive, String endInclusive)
+    {
+        ColumnSlice cs = new ColumnSlice();
+        cs.setStart(ByteBufferUtil.bytes(startInclusive));
+        cs.setFinish(ByteBufferUtil.bytes(endInclusive));
+        return cs;
+    }
+}
\ No newline at end of file


[4/6] git commit: Backport MultiSliceRequest

Posted by ja...@apache.org.
Backport MultiSliceRequest

Patch by Ed Capriolo; Reviewed by tjake for CASSANDRA-7027


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/7f63b1f9
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/7f63b1f9
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/7f63b1f9

Branch: refs/heads/trunk
Commit: 7f63b1f958f0f502f04d24f8d820d29bd484786d
Parents: 4a295b6
Author: Jake Luciani <ja...@apache.org>
Authored: Tue May 27 11:27:17 2014 -0400
Committer: Jake Luciani <ja...@apache.org>
Committed: Wed May 28 12:50:35 2014 -0400

----------------------------------------------------------------------
 CHANGES.txt                                     |    1 +
 interface/cassandra.thrift                      |   35 +-
 .../org/apache/cassandra/thrift/Cassandra.java  | 3071 +++++++++++++-----
 .../org/apache/cassandra/thrift/CfDef.java      |  298 +-
 .../apache/cassandra/thrift/ColumnSlice.java    |  551 ++++
 .../org/apache/cassandra/thrift/CqlRow.java     |    7 +-
 .../cassandra/thrift/MultiSliceRequest.java     | 1042 ++++++
 .../cassandra/thrift/CassandraServer.java       |   68 +
 .../apache/cassandra/thrift/MultiSliceTest.java |  149 +
 9 files changed, 4174 insertions(+), 1048 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/7f63b1f9/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index a71867a..a7cc872 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -25,6 +25,7 @@
  * Fix broken paging state with prepared statement (CASSANDRA-7120)
  * Fix IllegalArgumentException in CqlStorage (CASSANDRA-7287)
  * Allow nulls/non-existant fields in UDT (CASSANDRA-7206)
+ * Backport Thrift MultiSliceRequest (CASSANDRA-7027)
 Merged from 2.0:
  * Copy compaction options to make sure they are reloaded (CASSANDRA-7290)
  * Add option to do more aggressive tombstone compactions (CASSANDRA-6563)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7f63b1f9/interface/cassandra.thrift
----------------------------------------------------------------------
diff --git a/interface/cassandra.thrift b/interface/cassandra.thrift
index 4beb2eb..2484a5d 100644
--- a/interface/cassandra.thrift
+++ b/interface/cassandra.thrift
@@ -117,7 +117,6 @@ struct ColumnOrSuperColumn {
     4: optional CounterSuperColumn counter_super_column
 }
 
-
 #
 # Exceptions
 # (note that internal server errors will raise a TApplicationException, courtesy of Thrift)
@@ -571,6 +570,35 @@ struct CfSplit {
     3: required i64 row_count
 }
 
+/** The ColumnSlice is used to select a set of columns from inside a row.
+ * If start or finish are unspecified they will default to the start-of
+ * end-of value.
+ * @param start. The start of the ColumnSlice inclusive
+ * @param finish. The end of the ColumnSlice inclusive
+ */
+struct ColumnSlice {
+    1: optional binary start,
+    2: optional binary finish
+}
+
+/**
+ * Used to perform multiple slices on a single row key in one rpc operation
+ * @param key. The row key to be multi sliced
+ * @param column_parent. The column family (super columns are unsupported)
+ * @param column_slices. 0 to many ColumnSlice objects each will be used to select columns
+ * @param reversed. Direction of slice
+ * @param count. Maximum number of columns
+ * @param consistency_level. Level to perform the operation at
+ */
+struct MultiSliceRequest {
+    1: optional binary key,
+    2: optional ColumnParent column_parent,
+    3: optional list<ColumnSlice> column_slices,
+    4: optional bool reversed=false,
+    5: optional i32 count=1000,
+    6: optional ConsistencyLevel consistency_level=ConsistencyLevel.ONE
+}
+
 service Cassandra {
   # auth methods
   void login(1: required AuthenticationRequest auth_request) throws (1:AuthenticationException authnx, 2:AuthorizationException authzx),
@@ -749,6 +777,11 @@ service Cassandra {
   void truncate(1:required string cfname)
        throws (1: InvalidRequestException ire, 2: UnavailableException ue, 3: TimedOutException te),
 
+  /**
+  * Select multiple slices of a key in a single RPC operation
+  */
+  list<ColumnOrSuperColumn> get_multi_slice(1:required MultiSliceRequest request)
+       throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
 
     
   // Meta-APIs -- APIs to get information about the node or cluster,


[6/6] git commit: Merge branch 'cassandra-2.1' into trunk

Posted by ja...@apache.org.
Merge branch 'cassandra-2.1' into trunk

Conflicts:
	interface/cassandra.thrift
	src/java/org/apache/cassandra/thrift/CassandraServer.java
	test/unit/org/apache/cassandra/thrift/MultiSliceTest.java


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/235bae7b
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/235bae7b
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/235bae7b

Branch: refs/heads/trunk
Commit: 235bae7b30154ad442510d448c8d826cdc287cb1
Parents: d151b40 e9e91d7
Author: Jake Luciani <ja...@apache.org>
Authored: Wed May 28 13:55:32 2014 -0400
Committer: Jake Luciani <ja...@apache.org>
Committed: Wed May 28 13:55:32 2014 -0400

----------------------------------------------------------------------
 CHANGES.txt                                     |   2 +
 .../org/apache/cassandra/thrift/CfDef.java      | 298 ++++++++++---------
 .../org/apache/cassandra/thrift/CqlRow.java     |   7 +-
 .../cql3/statements/CQL3CasConditions.java      |   1 +
 .../cql3/statements/SelectStatement.java        |   1 +
 .../cassandra/db/composites/AbstractCType.java  |   5 +
 .../apache/cassandra/db/filter/ColumnSlice.java |  97 ++++++
 .../cassandra/thrift/CassandraServer.java       |   5 +-
 .../cassandra/db/filter/ColumnSliceTest.java    | 137 ++++++++-
 .../apache/cassandra/thrift/MultiSliceTest.java |  20 +-
 10 files changed, 413 insertions(+), 160 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/235bae7b/CHANGES.txt
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/235bae7b/src/java/org/apache/cassandra/cql3/statements/CQL3CasConditions.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/235bae7b/src/java/org/apache/cassandra/thrift/CassandraServer.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/thrift/CassandraServer.java
index 4f54242,1a77ffa..868c701
--- a/src/java/org/apache/cassandra/thrift/CassandraServer.java
+++ b/src/java/org/apache/cassandra/thrift/CassandraServer.java
@@@ -2044,7 -2078,9 +2044,8 @@@ public class CassandraServer implement
                      throw new InvalidRequestException(String.format("Reversed column slice at index %d had start less than finish", i));
                  slices[i] = new ColumnSlice(start, finish);
              }
-             SliceQueryFilter filter = new SliceQueryFilter(slices, request.reversed, request.count);
 -
+             ColumnSlice[] deoverlapped = ColumnSlice.deoverlapSlices(slices, request.reversed ? metadata.comparator.reverseComparator() : metadata.comparator);
+             SliceQueryFilter filter = new SliceQueryFilter(deoverlapped, request.reversed, request.count);
              ThriftValidation.validateKey(metadata, request.key);
              commands.add(ReadCommand.create(keyspace, request.key, request.column_parent.getColumn_family(), System.currentTimeMillis(), filter));
              return getSlice(commands, request.column_parent.isSetSuper_column(), consistencyLevel).entrySet().iterator().next().getValue();


[3/6] Backport MultiSliceRequest

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/7f63b1f9/interface/thrift/gen-java/org/apache/cassandra/thrift/Cassandra.java
----------------------------------------------------------------------
diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/Cassandra.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/Cassandra.java
index 15b99fa..55f4734 100644
--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/Cassandra.java
+++ b/interface/thrift/gen-java/org/apache/cassandra/thrift/Cassandra.java
@@ -248,6 +248,13 @@ public class Cassandra {
     public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException;
 
     /**
+     * Select multiple slices of a key in a single RPC operation
+     * 
+     * @param request
+     */
+    public List<ColumnOrSuperColumn> get_multi_slice(MultiSliceRequest request) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException;
+
+    /**
      * for each schema version present in the cluster, returns a list of nodes at that version.
      * hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION.
      * the cluster is all on the same version if the size of the map is 1.
@@ -480,6 +487,8 @@ public class Cassandra {
 
     public void truncate(String cfname, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
+    public void get_multi_slice(MultiSliceRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
     public void describe_schema_versions(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void describe_keyspaces(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -1138,6 +1147,38 @@ public class Cassandra {
       return;
     }
 
+    public List<ColumnOrSuperColumn> get_multi_slice(MultiSliceRequest request) throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException
+    {
+      send_get_multi_slice(request);
+      return recv_get_multi_slice();
+    }
+
+    public void send_get_multi_slice(MultiSliceRequest request) throws org.apache.thrift.TException
+    {
+      get_multi_slice_args args = new get_multi_slice_args();
+      args.setRequest(request);
+      sendBase("get_multi_slice", args);
+    }
+
+    public List<ColumnOrSuperColumn> recv_get_multi_slice() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException
+    {
+      get_multi_slice_result result = new get_multi_slice_result();
+      receiveBase(result, "get_multi_slice");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.ire != null) {
+        throw result.ire;
+      }
+      if (result.ue != null) {
+        throw result.ue;
+      }
+      if (result.te != null) {
+        throw result.te;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_multi_slice failed: unknown result");
+    }
+
     public Map<String,List<String>> describe_schema_versions() throws InvalidRequestException, org.apache.thrift.TException
     {
       send_describe_schema_versions();
@@ -2576,6 +2617,38 @@ public class Cassandra {
       }
     }
 
+    public void get_multi_slice(MultiSliceRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      get_multi_slice_call method_call = new get_multi_slice_call(request, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class get_multi_slice_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private MultiSliceRequest request;
+      public get_multi_slice_call(MultiSliceRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.request = request;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_multi_slice", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        get_multi_slice_args args = new get_multi_slice_args();
+        args.setRequest(request);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public List<ColumnOrSuperColumn> getResult() throws InvalidRequestException, UnavailableException, TimedOutException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_get_multi_slice();
+      }
+    }
+
     public void describe_schema_versions(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
       describe_schema_versions_call method_call = new describe_schema_versions_call(resultHandler, this, ___protocolFactory, ___transport);
@@ -3457,6 +3530,7 @@ public class Cassandra {
       processMap.put("batch_mutate", new batch_mutate());
       processMap.put("atomic_batch_mutate", new atomic_batch_mutate());
       processMap.put("truncate", new truncate());
+      processMap.put("get_multi_slice", new get_multi_slice());
       processMap.put("describe_schema_versions", new describe_schema_versions());
       processMap.put("describe_keyspaces", new describe_keyspaces());
       processMap.put("describe_cluster_name", new describe_cluster_name());
@@ -3987,6 +4061,34 @@ public class Cassandra {
       }
     }
 
+    public static class get_multi_slice<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_multi_slice_args> {
+      public get_multi_slice() {
+        super("get_multi_slice");
+      }
+
+      public get_multi_slice_args getEmptyArgsInstance() {
+        return new get_multi_slice_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public get_multi_slice_result getResult(I iface, get_multi_slice_args args) throws org.apache.thrift.TException {
+        get_multi_slice_result result = new get_multi_slice_result();
+        try {
+          result.success = iface.get_multi_slice(args.request);
+        } catch (InvalidRequestException ire) {
+          result.ire = ire;
+        } catch (UnavailableException ue) {
+          result.ue = ue;
+        } catch (TimedOutException te) {
+          result.te = te;
+        }
+        return result;
+      }
+    }
+
     public static class describe_schema_versions<I extends Iface> extends org.apache.thrift.ProcessFunction<I, describe_schema_versions_args> {
       public describe_schema_versions() {
         super("describe_schema_versions");
@@ -4660,6 +4762,7 @@ public class Cassandra {
       processMap.put("batch_mutate", new batch_mutate());
       processMap.put("atomic_batch_mutate", new atomic_batch_mutate());
       processMap.put("truncate", new truncate());
+      processMap.put("get_multi_slice", new get_multi_slice());
       processMap.put("describe_schema_versions", new describe_schema_versions());
       processMap.put("describe_keyspaces", new describe_keyspaces());
       processMap.put("describe_cluster_name", new describe_cluster_name());
@@ -5877,20 +5980,20 @@ public class Cassandra {
       }
     }
 
-    public static class describe_schema_versions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, describe_schema_versions_args, Map<String,List<String>>> {
-      public describe_schema_versions() {
-        super("describe_schema_versions");
+    public static class get_multi_slice<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_multi_slice_args, List<ColumnOrSuperColumn>> {
+      public get_multi_slice() {
+        super("get_multi_slice");
       }
 
-      public describe_schema_versions_args getEmptyArgsInstance() {
-        return new describe_schema_versions_args();
+      public get_multi_slice_args getEmptyArgsInstance() {
+        return new get_multi_slice_args();
       }
 
-      public AsyncMethodCallback<Map<String,List<String>>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<List<ColumnOrSuperColumn>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Map<String,List<String>>>() { 
-          public void onComplete(Map<String,List<String>> o) {
-            describe_schema_versions_result result = new describe_schema_versions_result();
+        return new AsyncMethodCallback<List<ColumnOrSuperColumn>>() { 
+          public void onComplete(List<ColumnOrSuperColumn> o) {
+            get_multi_slice_result result = new get_multi_slice_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -5903,67 +6006,20 @@ public class Cassandra {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            describe_schema_versions_result result = new describe_schema_versions_result();
+            get_multi_slice_result result = new get_multi_slice_result();
             if (e instanceof InvalidRequestException) {
                         result.ire = (InvalidRequestException) e;
                         result.setIreIsSet(true);
                         msg = result;
             }
-             else 
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, describe_schema_versions_args args, org.apache.thrift.async.AsyncMethodCallback<Map<String,List<String>>> resultHandler) throws TException {
-        iface.describe_schema_versions(resultHandler);
-      }
-    }
-
-    public static class describe_keyspaces<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, describe_keyspaces_args, List<KsDef>> {
-      public describe_keyspaces() {
-        super("describe_keyspaces");
-      }
-
-      public describe_keyspaces_args getEmptyArgsInstance() {
-        return new describe_keyspaces_args();
-      }
-
-      public AsyncMethodCallback<List<KsDef>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<List<KsDef>>() { 
-          public void onComplete(List<KsDef> o) {
-            describe_keyspaces_result result = new describe_keyspaces_result();
-            result.success = o;
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
+            else             if (e instanceof UnavailableException) {
+                        result.ue = (UnavailableException) e;
+                        result.setUeIsSet(true);
+                        msg = result;
             }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            describe_keyspaces_result result = new describe_keyspaces_result();
-            if (e instanceof InvalidRequestException) {
-                        result.ire = (InvalidRequestException) e;
-                        result.setIreIsSet(true);
+            else             if (e instanceof TimedOutException) {
+                        result.te = (TimedOutException) e;
+                        result.setTeIsSet(true);
                         msg = result;
             }
              else 
@@ -5986,127 +6042,25 @@ public class Cassandra {
         return false;
       }
 
-      public void start(I iface, describe_keyspaces_args args, org.apache.thrift.async.AsyncMethodCallback<List<KsDef>> resultHandler) throws TException {
-        iface.describe_keyspaces(resultHandler);
+      public void start(I iface, get_multi_slice_args args, org.apache.thrift.async.AsyncMethodCallback<List<ColumnOrSuperColumn>> resultHandler) throws TException {
+        iface.get_multi_slice(args.request,resultHandler);
       }
     }
 
-    public static class describe_cluster_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, describe_cluster_name_args, String> {
-      public describe_cluster_name() {
-        super("describe_cluster_name");
-      }
-
-      public describe_cluster_name_args getEmptyArgsInstance() {
-        return new describe_cluster_name_args();
-      }
-
-      public AsyncMethodCallback<String> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<String>() { 
-          public void onComplete(String o) {
-            describe_cluster_name_result result = new describe_cluster_name_result();
-            result.success = o;
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            describe_cluster_name_result result = new describe_cluster_name_result();
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, describe_cluster_name_args args, org.apache.thrift.async.AsyncMethodCallback<String> resultHandler) throws TException {
-        iface.describe_cluster_name(resultHandler);
-      }
-    }
-
-    public static class describe_version<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, describe_version_args, String> {
-      public describe_version() {
-        super("describe_version");
-      }
-
-      public describe_version_args getEmptyArgsInstance() {
-        return new describe_version_args();
-      }
-
-      public AsyncMethodCallback<String> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<String>() { 
-          public void onComplete(String o) {
-            describe_version_result result = new describe_version_result();
-            result.success = o;
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            describe_version_result result = new describe_version_result();
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, describe_version_args args, org.apache.thrift.async.AsyncMethodCallback<String> resultHandler) throws TException {
-        iface.describe_version(resultHandler);
-      }
-    }
-
-    public static class describe_ring<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, describe_ring_args, List<TokenRange>> {
-      public describe_ring() {
-        super("describe_ring");
+    public static class describe_schema_versions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, describe_schema_versions_args, Map<String,List<String>>> {
+      public describe_schema_versions() {
+        super("describe_schema_versions");
       }
 
-      public describe_ring_args getEmptyArgsInstance() {
-        return new describe_ring_args();
+      public describe_schema_versions_args getEmptyArgsInstance() {
+        return new describe_schema_versions_args();
       }
 
-      public AsyncMethodCallback<List<TokenRange>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Map<String,List<String>>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<List<TokenRange>>() { 
-          public void onComplete(List<TokenRange> o) {
-            describe_ring_result result = new describe_ring_result();
+        return new AsyncMethodCallback<Map<String,List<String>>>() { 
+          public void onComplete(Map<String,List<String>> o) {
+            describe_schema_versions_result result = new describe_schema_versions_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -6119,7 +6073,223 @@ public class Cassandra {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            describe_ring_result result = new describe_ring_result();
+            describe_schema_versions_result result = new describe_schema_versions_result();
+            if (e instanceof InvalidRequestException) {
+                        result.ire = (InvalidRequestException) e;
+                        result.setIreIsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, describe_schema_versions_args args, org.apache.thrift.async.AsyncMethodCallback<Map<String,List<String>>> resultHandler) throws TException {
+        iface.describe_schema_versions(resultHandler);
+      }
+    }
+
+    public static class describe_keyspaces<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, describe_keyspaces_args, List<KsDef>> {
+      public describe_keyspaces() {
+        super("describe_keyspaces");
+      }
+
+      public describe_keyspaces_args getEmptyArgsInstance() {
+        return new describe_keyspaces_args();
+      }
+
+      public AsyncMethodCallback<List<KsDef>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<List<KsDef>>() { 
+          public void onComplete(List<KsDef> o) {
+            describe_keyspaces_result result = new describe_keyspaces_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            describe_keyspaces_result result = new describe_keyspaces_result();
+            if (e instanceof InvalidRequestException) {
+                        result.ire = (InvalidRequestException) e;
+                        result.setIreIsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, describe_keyspaces_args args, org.apache.thrift.async.AsyncMethodCallback<List<KsDef>> resultHandler) throws TException {
+        iface.describe_keyspaces(resultHandler);
+      }
+    }
+
+    public static class describe_cluster_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, describe_cluster_name_args, String> {
+      public describe_cluster_name() {
+        super("describe_cluster_name");
+      }
+
+      public describe_cluster_name_args getEmptyArgsInstance() {
+        return new describe_cluster_name_args();
+      }
+
+      public AsyncMethodCallback<String> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<String>() { 
+          public void onComplete(String o) {
+            describe_cluster_name_result result = new describe_cluster_name_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            describe_cluster_name_result result = new describe_cluster_name_result();
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, describe_cluster_name_args args, org.apache.thrift.async.AsyncMethodCallback<String> resultHandler) throws TException {
+        iface.describe_cluster_name(resultHandler);
+      }
+    }
+
+    public static class describe_version<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, describe_version_args, String> {
+      public describe_version() {
+        super("describe_version");
+      }
+
+      public describe_version_args getEmptyArgsInstance() {
+        return new describe_version_args();
+      }
+
+      public AsyncMethodCallback<String> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<String>() { 
+          public void onComplete(String o) {
+            describe_version_result result = new describe_version_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            describe_version_result result = new describe_version_result();
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, describe_version_args args, org.apache.thrift.async.AsyncMethodCallback<String> resultHandler) throws TException {
+        iface.describe_version(resultHandler);
+      }
+    }
+
+    public static class describe_ring<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, describe_ring_args, List<TokenRange>> {
+      public describe_ring() {
+        super("describe_ring");
+      }
+
+      public describe_ring_args getEmptyArgsInstance() {
+        return new describe_ring_args();
+      }
+
+      public AsyncMethodCallback<List<TokenRange>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<List<TokenRange>>() { 
+          public void onComplete(List<TokenRange> o) {
+            describe_ring_result result = new describe_ring_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            describe_ring_result result = new describe_ring_result();
             if (e instanceof InvalidRequestException) {
                         result.ire = (InvalidRequestException) e;
                         result.setIreIsSet(true);
@@ -11615,14 +11785,14 @@ public class Cassandra {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list216 = iprot.readListBegin();
-                  struct.success = new ArrayList<ColumnOrSuperColumn>(_list216.size);
-                  for (int _i217 = 0; _i217 < _list216.size; ++_i217)
+                  org.apache.thrift.protocol.TList _list224 = iprot.readListBegin();
+                  struct.success = new ArrayList<ColumnOrSuperColumn>(_list224.size);
+                  for (int _i225 = 0; _i225 < _list224.size; ++_i225)
                   {
-                    ColumnOrSuperColumn _elem218;
-                    _elem218 = new ColumnOrSuperColumn();
-                    _elem218.read(iprot);
-                    struct.success.add(_elem218);
+                    ColumnOrSuperColumn _elem226;
+                    _elem226 = new ColumnOrSuperColumn();
+                    _elem226.read(iprot);
+                    struct.success.add(_elem226);
                   }
                   iprot.readListEnd();
                 }
@@ -11677,9 +11847,9 @@ public class Cassandra {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (ColumnOrSuperColumn _iter219 : struct.success)
+            for (ColumnOrSuperColumn _iter227 : struct.success)
             {
-              _iter219.write(oprot);
+              _iter227.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -11734,9 +11904,9 @@ public class Cassandra {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (ColumnOrSuperColumn _iter220 : struct.success)
+            for (ColumnOrSuperColumn _iter228 : struct.success)
             {
-              _iter220.write(oprot);
+              _iter228.write(oprot);
             }
           }
         }
@@ -11757,14 +11927,14 @@ public class Cassandra {
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list221 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<ColumnOrSuperColumn>(_list221.size);
-            for (int _i222 = 0; _i222 < _list221.size; ++_i222)
+            org.apache.thrift.protocol.TList _list229 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<ColumnOrSuperColumn>(_list229.size);
+            for (int _i230 = 0; _i230 < _list229.size; ++_i230)
             {
-              ColumnOrSuperColumn _elem223;
-              _elem223 = new ColumnOrSuperColumn();
-              _elem223.read(iprot);
-              struct.success.add(_elem223);
+              ColumnOrSuperColumn _elem231;
+              _elem231 = new ColumnOrSuperColumn();
+              _elem231.read(iprot);
+              struct.success.add(_elem231);
             }
           }
           struct.setSuccessIsSet(true);
@@ -13764,13 +13934,13 @@ public class Cassandra {
             case 1: // KEYS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list224 = iprot.readListBegin();
-                  struct.keys = new ArrayList<ByteBuffer>(_list224.size);
-                  for (int _i225 = 0; _i225 < _list224.size; ++_i225)
+                  org.apache.thrift.protocol.TList _list232 = iprot.readListBegin();
+                  struct.keys = new ArrayList<ByteBuffer>(_list232.size);
+                  for (int _i233 = 0; _i233 < _list232.size; ++_i233)
                   {
-                    ByteBuffer _elem226;
-                    _elem226 = iprot.readBinary();
-                    struct.keys.add(_elem226);
+                    ByteBuffer _elem234;
+                    _elem234 = iprot.readBinary();
+                    struct.keys.add(_elem234);
                   }
                   iprot.readListEnd();
                 }
@@ -13824,9 +13994,9 @@ public class Cassandra {
           oprot.writeFieldBegin(KEYS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.keys.size()));
-            for (ByteBuffer _iter227 : struct.keys)
+            for (ByteBuffer _iter235 : struct.keys)
             {
-              oprot.writeBinary(_iter227);
+              oprot.writeBinary(_iter235);
             }
             oprot.writeListEnd();
           }
@@ -13866,9 +14036,9 @@ public class Cassandra {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         {
           oprot.writeI32(struct.keys.size());
-          for (ByteBuffer _iter228 : struct.keys)
+          for (ByteBuffer _iter236 : struct.keys)
           {
-            oprot.writeBinary(_iter228);
+            oprot.writeBinary(_iter236);
           }
         }
         struct.column_parent.write(oprot);
@@ -13880,13 +14050,13 @@ public class Cassandra {
       public void read(org.apache.thrift.protocol.TProtocol prot, multiget_slice_args struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
         {
-          org.apache.thrift.protocol.TList _list229 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.keys = new ArrayList<ByteBuffer>(_list229.size);
-          for (int _i230 = 0; _i230 < _list229.size; ++_i230)
+          org.apache.thrift.protocol.TList _list237 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.keys = new ArrayList<ByteBuffer>(_list237.size);
+          for (int _i238 = 0; _i238 < _list237.size; ++_i238)
           {
-            ByteBuffer _elem231;
-            _elem231 = iprot.readBinary();
-            struct.keys.add(_elem231);
+            ByteBuffer _elem239;
+            _elem239 = iprot.readBinary();
+            struct.keys.add(_elem239);
           }
         }
         struct.setKeysIsSet(true);
@@ -14475,26 +14645,26 @@ public class Cassandra {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                 {
-                  org.apache.thrift.protocol.TMap _map232 = iprot.readMapBegin();
-                  struct.success = new HashMap<ByteBuffer,List<ColumnOrSuperColumn>>(2*_map232.size);
-                  for (int _i233 = 0; _i233 < _map232.size; ++_i233)
+                  org.apache.thrift.protocol.TMap _map240 = iprot.readMapBegin();
+                  struct.success = new HashMap<ByteBuffer,List<ColumnOrSuperColumn>>(2*_map240.size);
+                  for (int _i241 = 0; _i241 < _map240.size; ++_i241)
                   {
-                    ByteBuffer _key234;
-                    List<ColumnOrSuperColumn> _val235;
-                    _key234 = iprot.readBinary();
+                    ByteBuffer _key242;
+                    List<ColumnOrSuperColumn> _val243;
+                    _key242 = iprot.readBinary();
                     {
-                      org.apache.thrift.protocol.TList _list236 = iprot.readListBegin();
-                      _val235 = new ArrayList<ColumnOrSuperColumn>(_list236.size);
-                      for (int _i237 = 0; _i237 < _list236.size; ++_i237)
+                      org.apache.thrift.protocol.TList _list244 = iprot.readListBegin();
+                      _val243 = new ArrayList<ColumnOrSuperColumn>(_list244.size);
+                      for (int _i245 = 0; _i245 < _list244.size; ++_i245)
                       {
-                        ColumnOrSuperColumn _elem238;
-                        _elem238 = new ColumnOrSuperColumn();
-                        _elem238.read(iprot);
-                        _val235.add(_elem238);
+                        ColumnOrSuperColumn _elem246;
+                        _elem246 = new ColumnOrSuperColumn();
+                        _elem246.read(iprot);
+                        _val243.add(_elem246);
                       }
                       iprot.readListEnd();
                     }
-                    struct.success.put(_key234, _val235);
+                    struct.success.put(_key242, _val243);
                   }
                   iprot.readMapEnd();
                 }
@@ -14549,14 +14719,14 @@ public class Cassandra {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.success.size()));
-            for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> _iter239 : struct.success.entrySet())
+            for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> _iter247 : struct.success.entrySet())
             {
-              oprot.writeBinary(_iter239.getKey());
+              oprot.writeBinary(_iter247.getKey());
               {
-                oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter239.getValue().size()));
-                for (ColumnOrSuperColumn _iter240 : _iter239.getValue())
+                oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter247.getValue().size()));
+                for (ColumnOrSuperColumn _iter248 : _iter247.getValue())
                 {
-                  _iter240.write(oprot);
+                  _iter248.write(oprot);
                 }
                 oprot.writeListEnd();
               }
@@ -14614,14 +14784,14 @@ public class Cassandra {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> _iter241 : struct.success.entrySet())
+            for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> _iter249 : struct.success.entrySet())
             {
-              oprot.writeBinary(_iter241.getKey());
+              oprot.writeBinary(_iter249.getKey());
               {
-                oprot.writeI32(_iter241.getValue().size());
-                for (ColumnOrSuperColumn _iter242 : _iter241.getValue())
+                oprot.writeI32(_iter249.getValue().size());
+                for (ColumnOrSuperColumn _iter250 : _iter249.getValue())
                 {
-                  _iter242.write(oprot);
+                  _iter250.write(oprot);
                 }
               }
             }
@@ -14644,25 +14814,25 @@ public class Cassandra {
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TMap _map243 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32());
-            struct.success = new HashMap<ByteBuffer,List<ColumnOrSuperColumn>>(2*_map243.size);
-            for (int _i244 = 0; _i244 < _map243.size; ++_i244)
+            org.apache.thrift.protocol.TMap _map251 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32());
+            struct.success = new HashMap<ByteBuffer,List<ColumnOrSuperColumn>>(2*_map251.size);
+            for (int _i252 = 0; _i252 < _map251.size; ++_i252)
             {
-              ByteBuffer _key245;
-              List<ColumnOrSuperColumn> _val246;
-              _key245 = iprot.readBinary();
+              ByteBuffer _key253;
+              List<ColumnOrSuperColumn> _val254;
+              _key253 = iprot.readBinary();
               {
-                org.apache.thrift.protocol.TList _list247 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-                _val246 = new ArrayList<ColumnOrSuperColumn>(_list247.size);
-                for (int _i248 = 0; _i248 < _list247.size; ++_i248)
+                org.apache.thrift.protocol.TList _list255 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+                _val254 = new ArrayList<ColumnOrSuperColumn>(_list255.size);
+                for (int _i256 = 0; _i256 < _list255.size; ++_i256)
                 {
-                  ColumnOrSuperColumn _elem249;
-                  _elem249 = new ColumnOrSuperColumn();
-                  _elem249.read(iprot);
-                  _val246.add(_elem249);
+                  ColumnOrSuperColumn _elem257;
+                  _elem257 = new ColumnOrSuperColumn();
+                  _elem257.read(iprot);
+                  _val254.add(_elem257);
                 }
               }
-              struct.success.put(_key245, _val246);
+              struct.success.put(_key253, _val254);
             }
           }
           struct.setSuccessIsSet(true);
@@ -15283,13 +15453,13 @@ public class Cassandra {
             case 1: // KEYS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list250 = iprot.readListBegin();
-                  struct.keys = new ArrayList<ByteBuffer>(_list250.size);
-                  for (int _i251 = 0; _i251 < _list250.size; ++_i251)
+                  org.apache.thrift.protocol.TList _list258 = iprot.readListBegin();
+                  struct.keys = new ArrayList<ByteBuffer>(_list258.size);
+                  for (int _i259 = 0; _i259 < _list258.size; ++_i259)
                   {
-                    ByteBuffer _elem252;
-                    _elem252 = iprot.readBinary();
-                    struct.keys.add(_elem252);
+                    ByteBuffer _elem260;
+                    _elem260 = iprot.readBinary();
+                    struct.keys.add(_elem260);
                   }
                   iprot.readListEnd();
                 }
@@ -15343,9 +15513,9 @@ public class Cassandra {
           oprot.writeFieldBegin(KEYS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.keys.size()));
-            for (ByteBuffer _iter253 : struct.keys)
+            for (ByteBuffer _iter261 : struct.keys)
             {
-              oprot.writeBinary(_iter253);
+              oprot.writeBinary(_iter261);
             }
             oprot.writeListEnd();
           }
@@ -15385,9 +15555,9 @@ public class Cassandra {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         {
           oprot.writeI32(struct.keys.size());
-          for (ByteBuffer _iter254 : struct.keys)
+          for (ByteBuffer _iter262 : struct.keys)
           {
-            oprot.writeBinary(_iter254);
+            oprot.writeBinary(_iter262);
           }
         }
         struct.column_parent.write(oprot);
@@ -15399,13 +15569,13 @@ public class Cassandra {
       public void read(org.apache.thrift.protocol.TProtocol prot, multiget_count_args struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
         {
-          org.apache.thrift.protocol.TList _list255 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.keys = new ArrayList<ByteBuffer>(_list255.size);
-          for (int _i256 = 0; _i256 < _list255.size; ++_i256)
+          org.apache.thrift.protocol.TList _list263 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.keys = new ArrayList<ByteBuffer>(_list263.size);
+          for (int _i264 = 0; _i264 < _list263.size; ++_i264)
           {
-            ByteBuffer _elem257;
-            _elem257 = iprot.readBinary();
-            struct.keys.add(_elem257);
+            ByteBuffer _elem265;
+            _elem265 = iprot.readBinary();
+            struct.keys.add(_elem265);
           }
         }
         struct.setKeysIsSet(true);
@@ -15978,15 +16148,15 @@ public class Cassandra {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                 {
-                  org.apache.thrift.protocol.TMap _map258 = iprot.readMapBegin();
-                  struct.success = new HashMap<ByteBuffer,Integer>(2*_map258.size);
-                  for (int _i259 = 0; _i259 < _map258.size; ++_i259)
+                  org.apache.thrift.protocol.TMap _map266 = iprot.readMapBegin();
+                  struct.success = new HashMap<ByteBuffer,Integer>(2*_map266.size);
+                  for (int _i267 = 0; _i267 < _map266.size; ++_i267)
                   {
-                    ByteBuffer _key260;
-                    int _val261;
-                    _key260 = iprot.readBinary();
-                    _val261 = iprot.readI32();
-                    struct.success.put(_key260, _val261);
+                    ByteBuffer _key268;
+                    int _val269;
+                    _key268 = iprot.readBinary();
+                    _val269 = iprot.readI32();
+                    struct.success.put(_key268, _val269);
                   }
                   iprot.readMapEnd();
                 }
@@ -16041,10 +16211,10 @@ public class Cassandra {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, struct.success.size()));
-            for (Map.Entry<ByteBuffer, Integer> _iter262 : struct.success.entrySet())
+            for (Map.Entry<ByteBuffer, Integer> _iter270 : struct.success.entrySet())
             {
-              oprot.writeBinary(_iter262.getKey());
-              oprot.writeI32(_iter262.getValue());
+              oprot.writeBinary(_iter270.getKey());
+              oprot.writeI32(_iter270.getValue());
             }
             oprot.writeMapEnd();
           }
@@ -16099,10 +16269,10 @@ public class Cassandra {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (Map.Entry<ByteBuffer, Integer> _iter263 : struct.success.entrySet())
+            for (Map.Entry<ByteBuffer, Integer> _iter271 : struct.success.entrySet())
             {
-              oprot.writeBinary(_iter263.getKey());
-              oprot.writeI32(_iter263.getValue());
+              oprot.writeBinary(_iter271.getKey());
+              oprot.writeI32(_iter271.getValue());
             }
           }
         }
@@ -16123,15 +16293,15 @@ public class Cassandra {
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TMap _map264 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32());
-            struct.success = new HashMap<ByteBuffer,Integer>(2*_map264.size);
-            for (int _i265 = 0; _i265 < _map264.size; ++_i265)
+            org.apache.thrift.protocol.TMap _map272 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32());
+            struct.success = new HashMap<ByteBuffer,Integer>(2*_map272.size);
+            for (int _i273 = 0; _i273 < _map272.size; ++_i273)
             {
-              ByteBuffer _key266;
-              int _val267;
-              _key266 = iprot.readBinary();
-              _val267 = iprot.readI32();
-              struct.success.put(_key266, _val267);
+              ByteBuffer _key274;
+              int _val275;
+              _key274 = iprot.readBinary();
+              _val275 = iprot.readI32();
+              struct.success.put(_key274, _val275);
             }
           }
           struct.setSuccessIsSet(true);
@@ -17409,14 +17579,14 @@ public class Cassandra {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list268 = iprot.readListBegin();
-                  struct.success = new ArrayList<KeySlice>(_list268.size);
-                  for (int _i269 = 0; _i269 < _list268.size; ++_i269)
+                  org.apache.thrift.protocol.TList _list276 = iprot.readListBegin();
+                  struct.success = new ArrayList<KeySlice>(_list276.size);
+                  for (int _i277 = 0; _i277 < _list276.size; ++_i277)
                   {
-                    KeySlice _elem270;
-                    _elem270 = new KeySlice();
-                    _elem270.read(iprot);
-                    struct.success.add(_elem270);
+                    KeySlice _elem278;
+                    _elem278 = new KeySlice();
+                    _elem278.read(iprot);
+                    struct.success.add(_elem278);
                   }
                   iprot.readListEnd();
                 }
@@ -17471,9 +17641,9 @@ public class Cassandra {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (KeySlice _iter271 : struct.success)
+            for (KeySlice _iter279 : struct.success)
             {
-              _iter271.write(oprot);
+              _iter279.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -17528,9 +17698,9 @@ public class Cassandra {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (KeySlice _iter272 : struct.success)
+            for (KeySlice _iter280 : struct.success)
             {
-              _iter272.write(oprot);
+              _iter280.write(oprot);
             }
           }
         }
@@ -17551,14 +17721,14 @@ public class Cassandra {
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list273 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<KeySlice>(_list273.size);
-            for (int _i274 = 0; _i274 < _list273.size; ++_i274)
+            org.apache.thrift.protocol.TList _list281 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<KeySlice>(_list281.size);
+            for (int _i282 = 0; _i282 < _list281.size; ++_i282)
             {
-              KeySlice _elem275;
-              _elem275 = new KeySlice();
-              _elem275.read(iprot);
-              struct.success.add(_elem275);
+              KeySlice _elem283;
+              _elem283 = new KeySlice();
+              _elem283.read(iprot);
+              struct.success.add(_elem283);
             }
           }
           struct.setSuccessIsSet(true);
@@ -18837,14 +19007,14 @@ public class Cassandra {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list276 = iprot.readListBegin();
-                  struct.success = new ArrayList<KeySlice>(_list276.size);
-                  for (int _i277 = 0; _i277 < _list276.size; ++_i277)
+                  org.apache.thrift.protocol.TList _list284 = iprot.readListBegin();
+                  struct.success = new ArrayList<KeySlice>(_list284.size);
+                  for (int _i285 = 0; _i285 < _list284.size; ++_i285)
                   {
-                    KeySlice _elem278;
-                    _elem278 = new KeySlice();
-                    _elem278.read(iprot);
-                    struct.success.add(_elem278);
+                    KeySlice _elem286;
+                    _elem286 = new KeySlice();
+                    _elem286.read(iprot);
+                    struct.success.add(_elem286);
                   }
                   iprot.readListEnd();
                 }
@@ -18899,9 +19069,9 @@ public class Cassandra {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (KeySlice _iter279 : struct.success)
+            for (KeySlice _iter287 : struct.success)
             {
-              _iter279.write(oprot);
+              _iter287.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -18956,9 +19126,9 @@ public class Cassandra {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (KeySlice _iter280 : struct.success)
+            for (KeySlice _iter288 : struct.success)
             {
-              _iter280.write(oprot);
+              _iter288.write(oprot);
             }
           }
         }
@@ -18979,14 +19149,14 @@ public class Cassandra {
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list281 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<KeySlice>(_list281.size);
-            for (int _i282 = 0; _i282 < _list281.size; ++_i282)
+            org.apache.thrift.protocol.TList _list289 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<KeySlice>(_list289.size);
+            for (int _i290 = 0; _i290 < _list289.size; ++_i290)
             {
-              KeySlice _elem283;
-              _elem283 = new KeySlice();
-              _elem283.read(iprot);
-              struct.success.add(_elem283);
+              KeySlice _elem291;
+              _elem291 = new KeySlice();
+              _elem291.read(iprot);
+              struct.success.add(_elem291);
             }
           }
           struct.setSuccessIsSet(true);
@@ -20264,14 +20434,14 @@ public class Cassandra {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list284 = iprot.readListBegin();
-                  struct.success = new ArrayList<KeySlice>(_list284.size);
-                  for (int _i285 = 0; _i285 < _list284.size; ++_i285)
+                  org.apache.thrift.protocol.TList _list292 = iprot.readListBegin();
+                  struct.success = new ArrayList<KeySlice>(_list292.size);
+                  for (int _i293 = 0; _i293 < _list292.size; ++_i293)
                   {
-                    KeySlice _elem286;
-                    _elem286 = new KeySlice();
-                    _elem286.read(iprot);
-                    struct.success.add(_elem286);
+                    KeySlice _elem294;
+                    _elem294 = new KeySlice();
+                    _elem294.read(iprot);
+                    struct.success.add(_elem294);
                   }
                   iprot.readListEnd();
                 }
@@ -20326,9 +20496,9 @@ public class Cassandra {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (KeySlice _iter287 : struct.success)
+            for (KeySlice _iter295 : struct.success)
             {
-              _iter287.write(oprot);
+              _iter295.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -20383,9 +20553,9 @@ public class Cassandra {
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (KeySlice _iter288 : struct.success)
+            for (KeySlice _iter296 : struct.success)
             {
-              _iter288.write(oprot);
+              _iter296.write(oprot);
             }
           }
         }
@@ -20406,14 +20576,14 @@ public class Cassandra {
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list289 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<KeySlice>(_list289.size);
-            for (int _i290 = 0; _i290 < _list289.size; ++_i290)
+            org.apache.thrift.protocol.TList _list297 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<KeySlice>(_list297.size);
+            for (int _i298 = 0; _i298 < _list297.size; ++_i298)
             {
-              KeySlice _elem291;
-              _elem291 = new KeySlice();
-              _elem291.read(iprot);
-              struct.success.add(_elem291);
+              KeySlice _elem299;
+              _elem299 = new KeySlice();
+              _elem299.read(iprot);
+              struct.success.add(_elem299);
             }
           }
           struct.setSuccessIsSet(true);
@@ -23809,14 +23979,14 @@ public class Cassandra {
             case 3: // EXPECTED
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list292 = iprot.readListBegin();
-                  struct.expected = new ArrayList<Column>(_list292.size);
-                  for (int _i293 = 0; _i293 < _list292.size; ++_i293)
+                  org.apache.thrift.protocol.TList _list300 = iprot.readListBegin();
+                  struct.expected = new ArrayList<Column>(_list300.size);
+                  for (int _i301 = 0; _i301 < _list300.size; ++_i301)
                   {
-                    Column _elem294;
-                    _elem294 = new Column();
-                    _elem294.read(iprot);
-                    struct.expected.add(_elem294);
+                    Column _elem302;
+                    _elem302 = new Column();
+                    _elem302.read(iprot);
+                    struct.expected.add(_elem302);
                   }
                   iprot.readListEnd();
                 }
@@ -23828,14 +23998,14 @@ public class Cassandra {
             case 4: // UPDATES
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list295 = iprot.readListBegin();
-                  struct.updates = new ArrayList<Column>(_list295.size);
-                  for (int _i296 = 0; _i296 < _list295.size; ++_i296)
+                  org.apache.thrift.protocol.TList _list303 = iprot.readListBegin();
+                  struct.updates = new ArrayList<Column>(_list303.size);
+                  for (int _i304 = 0; _i304 < _list303.size; ++_i304)
                   {
-                    Column _elem297;
-                    _elem297 = new Column();
-                    _elem297.read(iprot);
-                    struct.updates.add(_elem297);
+                    Column _elem305;
+                    _elem305 = new Column();
+                    _elem305.read(iprot);
+                    struct.updates.add(_elem305);
                   }
                   iprot.readListEnd();
                 }
@@ -23889,9 +24059,9 @@ public class Cassandra {
           oprot.writeFieldBegin(EXPECTED_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.expected.size()));
-            for (Column _iter298 : struct.expected)
+            for (Column _iter306 : struct.expected)
             {
-              _iter298.write(oprot);
+              _iter306.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -23901,9 +24071,9 @@ public class Cassandra {
           oprot.writeFieldBegin(UPDATES_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.updates.size()));
-            for (Column _iter299 : struct.updates)
+            for (Column _iter307 : struct.updates)
             {
-              _iter299.write(oprot);
+              _iter307.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -23951,18 +24121,18 @@ public class Cassandra {
         if (struct.isSetExpected()) {
           {
             oprot.writeI32(struct.expected.size());
-            for (Column _iter300 : struct.expected)
+            for (Column _iter308 : struct.expected)
             {
-              _iter300.write(oprot);
+              _iter308.write(oprot);
             }
           }
         }
         if (struct.isSetUpdates()) {
           {
             oprot.writeI32(struct.updates.size());
-            for (Column _iter301 : struct.updates)
+            for (Column _iter309 : struct.updates)
             {
-              _iter301.write(oprot);
+              _iter309.write(oprot);
             }
           }
         }
@@ -23982,28 +24152,28 @@ public class Cassandra {
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list302 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.expected = new ArrayList<Column>(_list302.size);
-            for (int _i303 = 0; _i303 < _list302.size; ++_i303)
+            org.apache.thrift.protocol.TList _list310 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.expected = new ArrayList<Column>(_list310.size);
+            for (int _i311 = 0; _i311 < _list310.size; ++_i311)
             {
-              Column _elem304;
-              _elem304 = new Column();
-              _elem304.read(iprot);
-              struct.expected.add(_elem304);
+              Column _elem312;
+              _elem312 = new Column();
+              _elem312.read(iprot);
+              struct.expected.add(_elem312);
             }
           }
           struct.setExpectedIsSet(true);
         }
         if (incoming.get(1)) {
           {
-            org.apache.thrift.protocol.TList _list305 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.updates = new ArrayList<Column>(_list305.size);
-            for (int _i306 = 0; _i306 < _list305.size; ++_i306)
+            org.apache.thrift.protocol.TList _list313 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.updates = new ArrayList<Column>(_list313.size);
+            for (int _i314 = 0; _i314 < _list313.size; ++_i314)
             {
-              Column _elem307;
-              _elem307 = new Column();
-              _elem307.read(iprot);
-              struct.updates.add(_elem307);
+              Column _elem315;
+              _elem315 = new Column();
+              _elem315.read(iprot);
+              struct.updates.add(_elem315);
             }
           }
           struct.setUpdatesIsSet(true);
@@ -27589,38 +27759,38 @@ public class Cassandra {
             case 1: // MUTATION_MAP
               if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                 {
-                  org.apache.thrift.protocol.TMap _map308 = iprot.readMapBegin();
-                  struct.mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>(2*_map308.size);
-                  for (int _i309 = 0; _i309 < _map308.size; ++_i309)
+                  org.apache.thrift.protocol.TMap _map316 = iprot.readMapBegin();
+                  struct.mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>(2*_map316.size);
+                  for (int _i317 = 0; _i317 < _map316.size; ++_i317)
                   {
-                    ByteBuffer _key310;
-                    Map<String,List<Mutation>> _val311;
-                    _key310 = iprot.readBinary();
+                    ByteBuffer _key318;
+                    Map<String,List<Mutation>> _val319;
+                    _key318 = iprot.readBinary();
                     {
-                      org.apache.thrift.protocol.TMap _map312 = iprot.readMapBegin();
-                      _val311 = new HashMap<String,List<Mutation>>(2*_map312.size);
-                      for (int _i313 = 0; _i313 < _map312.size; ++_i313)
+                      org.apache.thrift.protocol.TMap _map320 = iprot.readMapBegin();
+                      _val319 = new HashMap<String,List<Mutation>>(2*_map320.size);
+                      for (int _i321 = 0; _i321 < _map320.size; ++_i321)
                       {
-                        String _key314;
-                        List<Mutation> _val315;
-                        _key314 = iprot.readString();
+                        String _key322;
+                        List<Mutation> _val323;
+                        _key322 = iprot.readString();
                         {
-                          org.apache.thrift.protocol.TList _list316 = iprot.readListBegin();
-                          _val315 = new ArrayList<Mutation>(_list316.size);
-                          for (int _i317 = 0; _i317 < _list316.size; ++_i317)
+                          org.apache.thrift.protocol.TList _list324 = iprot.readListBegin();
+                          _val323 = new ArrayList<Mutation>(_list324.size);
+                          for (int _i325 = 0; _i325 < _list324.size; ++_i325)
                           {
-                            Mutation _elem318;
-                            _elem318 = new Mutation();
-                            _elem318.read(iprot);
-                            _val315.add(_elem318);
+                            Mutation _elem326;
+                            _elem326 = new Mutation();
+                            _elem326.read(iprot);
+                            _val323.add(_elem326);
                           }
                           iprot.readListEnd();
                         }
-                        _val311.put(_key314, _val315);
+                        _val319.put(_key322, _val323);
                       }
                       iprot.readMapEnd();
                     }
-                    struct.mutation_map.put(_key310, _val311);
+                    struct.mutation_map.put(_key318, _val319);
                   }
                   iprot.readMapEnd();
                 }
@@ -27656,19 +27826,19 @@ public class Cassandra {
           oprot.writeFieldBegin(MUTATION_MAP_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.mutation_map.size()));
-            for (Map.Entry<ByteBuffer, Map<String,List<Mutation>>> _iter319 : struct.mutation_map.entrySet())
+            for (Map.Entry<ByteBuffer, Map<String,List<Mutation>>> _iter327 : struct.mutation_map.entrySet())
             {
-              oprot.writeBinary(_iter319.getKey());
+              oprot.writeBinary(_iter327.getKey());
               {
-                oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, _iter319.getValue().size()));
-                for (Map.Entry<String, List<Mutation>> _iter320 : _iter319.getValue().entrySet())
+                oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, _iter327.getValue().size()));
+                for (Map.Entry<String, List<Mutation>> _iter328 : _iter327.getValue().entrySet())
                 {
-                  oprot.writeString(_iter320.getKey());
+                  oprot.writeString(_iter328.getKey());
                   {
-                    oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter320.getValue().size()));
-                    for (Mutation _iter321 : _iter320.getValue())
+                    oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter328.getValue().size()));
+                    for (Mutation _iter329 : _iter328.getValue())
                     {
-                      _iter321.write(oprot);
+                      _iter329.write(oprot);
                     }
                     oprot.writeListEnd();
                   }
@@ -27704,19 +27874,19 @@ public class Cassandra {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         {
           oprot.writeI32(struct.mutation_map.size());
-          for (Map.Entry<ByteBuffer, Map<String,List<Mutation>>> _iter322 : struct.mutation_map.entrySet())
+          for (Map.Entry<ByteBuffer, Map<String,List<Mutation>>> _iter330 : struct.mutation_map.entrySet())
           {
-            oprot.writeBinary(_iter322.getKey());
+            oprot.writeBinary(_iter330.getKey());
             {
-              oprot.writeI32(_iter322.getValue().size());
-              for (Map.Entry<String, List<Mutation>> _iter323 : _iter322.getValue().entrySet())
+              oprot.writeI32(_iter330.getValue().size());
+              for (Map.Entry<String, List<Mutation>> _iter331 : _iter330.getValue().entrySet())
               {
-                oprot.writeString(_iter323.getKey());
+                oprot.writeString(_iter331.getKey());
                 {
-                  oprot.writeI32(_iter323.getValue().size());
-                  for (Mutation _iter324 : _iter323.getValue())
+                  oprot.writeI32(_iter331.getValue().size());
+                  for (Mutation _iter332 : _iter331.getValue())
                   {
-                    _iter324.write(oprot);
+                    _iter332.write(oprot);
                   }
                 }
               }
@@ -27730,36 +27900,36 @@ public class Cassandra {
       public void read(org.apache.thrift.protocol.TProtocol prot, batch_mutate_args struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
         {
-          org.apache.thrift.protocol.TMap _map325 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32());
-          struct.mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>(2*_map325.size);
-          for (int _i326 = 0; _i326 < _map325.size; ++_i326)
+          org.apache.thrift.protocol.TMap _map333 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32());
+          struct.mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>(2*_map333.size);
+          for (int _i334 = 0; _i334 < _map333.size; ++_i334)
           {
-            ByteBuffer _key327;
-            Map<String,List<Mutation>> _val328;
-            _key327 = iprot.readBinary();
+            ByteBuffer _key335;
+            Map<String,List<Mutation>> _val336;
+            _key335 = iprot.readBinary();
             {
-              org.apache.thrift.protocol.TMap _map329 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32());
-              _val328 = new HashMap<String,List<Mutation>>(2*_map329.size);
-              for (int _i330 = 0; _i330 < _map329.size; ++_i330)
+              org.apache.thrift.protocol.TMap _map337 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32());
+              _val336 = new HashMap<String,List<Mutation>>(2*_map337.size);
+              for (int _i338 = 0; _i338 < _map337.size; ++_i338)
               {
-                String _key331;
-                List<Mutation> _val332;
-                _key331 = iprot.readString();
+                String _key339;
+                List<Mutation> _val340;
+                _key339 = iprot.readString();
                 {
-                  org.apache.thrift.protocol.TList _list333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-                  _val332 = new ArrayList<Mutation>(_list333.size);
-                  for (int _i334 = 0; _i334 < _list333.size; ++_i334)
+                  org.apache.thrift.protocol.TList _list341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+                  _val340 = new ArrayList<Mutation>(_list341.size);
+                  for (int _i342 = 0; _i342 < _list341.size; ++_i342)
                   {
-                    Mutation _elem335;
-                    _elem335 = new Mutation();
-                    _elem335.read(iprot);
-                    _val332.add(_elem335);
+                    Mutation _elem343;
+                    _elem343 = new Mutation();
+                    _elem343.read(iprot);
+                    _val340.add(_elem343);
                   }
                 }
-                _val328.put(_key331, _val332);
+                _val336.put(_key339, _val340);
               }
             }
-            struct.mutation_map.put(_key327, _val328);
+            struct.mutation_map.put(_key335, _val336);
           }
         }
         struct.setMutation_mapIsSet(true);
@@ -28793,38 +28963,38 @@ public class Cassandra {
             case 1: // MUTATION_MAP
               if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                 {
-                  org.apache.thrift.protocol.TMap _map336 = iprot.readMapBegin();
-                  struct.mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>(2*_map336.size);
-                  for (int _i337 = 0; _i337 < _map336.size; ++_i337)
+                  org.apache.thrift.protocol.TMap _map344 = iprot.readMapBegin();
+                  struct.mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>(2*_map344.size);
+                  for (int _i345 = 0; _i345 < _map344.size; ++_i345)
                   {
-                    ByteBuffer _key338;
-                    Map<String,List<Mutation>> _val339;
-                    _key338 = iprot.readBinary();
+                    ByteBuffer _key346;
+                    Map<String,List<Mutation>> _val347;
+                    _key346 = iprot.readBinary();
                     {
-                      org.apache.thrift.protocol.TMap _map340 = iprot.readMapBegin();
-                      _val339 = new HashMap<String,List<Mutation>>(2*_map340.size);
-                      for (int _i341 = 0; _i341 < _map340.size; ++_i341)
+                      org.apache.thrift.protocol.TMap _map348 = iprot.readMapBegin();
+                      _val347 = new HashMap<String,List<Mutation>>(2*_map348.size);
+                      for (int _i349 = 0; _i349 < _map348.size; ++_i349)
                       {
-                        String _key342;
-                        List<Mutation> _val343;
-                        _key342 = iprot.readString();
+                        String _key350;
+                        List<Mutation> _val351;
+                        _key350 = iprot.readString();
                         {
-                          org.apache.thrift.protocol.TList _list344 = iprot.readListBegin();
-                          _val343 = new ArrayList<Mutation>(_list344.size);
-                          for (int _i345 = 0; _i345 < _list344.size; ++_i345)
+                          org.apache.thrift.protocol.TList _list352 = iprot.readListBegin();
+                          _val351 = new ArrayList<Mutation>(_list352.size);
+                          for (int _i353 = 0; _i353 < _list352.size; ++_i353)
                           {
-                            Mutation _elem346;
-                            _elem346 = new Mutation();
-                            _elem346.read(iprot);
-                            _val343.add(_elem346);
+                            Mutation _elem354;
+                            _elem354 = new Mutation();
+                            _elem354.read(iprot);
+                            _val351.add(_elem354);
                           }
                           iprot.readListEnd();
                         }
-                        _val339.put(_key342, _val343);
+                        _val347.put(_key350, _val351);
                       }
                       iprot.readMapEnd();
                     }
-                    struct.mutation_map.put(_key338, _val339);
+                    struct.mutation_map.put(_key346, _val347);
                   }
                   iprot.readMapEnd();
                 }
@@ -28860,19 +29030,19 @@ public class Cassandra {
           oprot.writeFieldBegin(MUTATION_MAP_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.mutation_map.size()));
-            for (Map.Entry<ByteBuffer, Map<String,List<Mutation>>> _iter347 : struct.mutation_map.entrySet())
+            for (Map.Entry<ByteBuffer, Map<String,List<Mutation>>> _iter355 : struct.mutation_map.entrySet())
             {
-              oprot.writeBinary(_iter347.getKey());
+              oprot.writeBinary(_iter355.getKey());
               {
-                oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, _iter347.getValue().size()));
-                for (Map.Entry<String, List<Mutation>> _iter348 : _iter347.getValue().entrySet())
+                oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, _iter355.getValue().size()));
+                for (Map.Entry<String, List<Mutation>> _iter356 : _iter355.getValue().entrySet())
                 {
-                  oprot.writeString(_iter348.getKey());
+                  oprot.writeString(_iter356.getKey());
                   {
-                    oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter348.getValue().size()));
-                    for (Mutation _iter349 : _iter348.getValue())
+                    oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter356.getValue().size()));
+                    for (Mutation _iter357 : _iter356.getValue())
                     {
-                      _iter349.write(oprot);
+                      _iter357.write(oprot);
                     }
                     oprot.writeListEnd();
                   }
@@ -28908,19 +29078,19 @@ public class Cassandra {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         {
           oprot.writeI32(struct.mutation_map.size());
-          for (Map.Entry<ByteBuffer, Map<String,List<Mutation>>> _iter350 : struct.mutation_map.entrySet())
+          for (Map.Entry<ByteBuffer, Map<String,List<Mutation>>> _iter358 : struct.mutation_map.entrySet())
           {
-            oprot.writeBinary(_iter350.getKey());
+            oprot.writeBinary(_iter358.getKey());
             {
-              oprot.writeI32(_iter350.getValue().size());
-              for (Map.Entry<String, List<Mutation>> _iter351 : _iter350.getValue().entrySet())
+              oprot.writeI32(_iter358.getValue().size());
+              for (Map.Entry<String, List<Mutation>> _iter359 : _iter358.getValue().entrySet())
               {
-                oprot.writeString(_iter351.getKey());
+                oprot.writeString(_iter359.getKey());
                 {
-                  oprot.writeI32(_iter351.getValue().size());
-                  for (Mutation _iter352 : _iter351.getValue())
+                  oprot.writeI32(_iter359.getValue().size());
+                  for (Mutation _iter360 : _iter359.getValue())
                   {
-                    _iter352.write(oprot);
+                    _iter360.write(oprot);
                   }
                 }
               }
@@ -28934,36 +29104,36 @@ public class Cassandra {
       public void read(org.apache.thrift.protocol.TProtocol prot, atomic_batch_mutate_args struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
         {
-          org.apache.thrift.protocol.TMap _map353 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32());
-          struct.mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>(2*_map353.size);
-          for (int _i354 = 0; _i354 < _map353.size; ++_i354)
+          org.apache.thrift.protocol.TMap _map361 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32());
+          struct.mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>(2*_map361.size);
+          for (int _i362 = 0; _i362 < _map361.size; ++_i362)
           {
-            ByteBuffer _key355;
-            Map<String,List<Mutation>> _val356;
-            _key355 = iprot.readBinary();
+            ByteBuffer _key363;
+            Map<String,List<Mutation>> _val364;
+            _key363 = iprot.readBinary();
             {
-              org.apache.thrift.protocol.TMap _map357 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32());
-              _val356 = new HashMap<String,List<Mutation>>(2*_map357.size);
-              for (int _i358 = 0; _i358 < _map357.size; ++_i358)
+              org.apache.thrift.protocol.TMap _map365 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32());
+              _val364 = new HashMap<String,List<Mutation>>(2*_map365.size);
+              for (int _i366 = 0; _i366 < _map365.size; ++_i366)
               {
-                String _key359;
-                List<Mutation> _val360;
-                _key359 = iprot.readString();
+                String _key367;
+                List<Mutation> _val368;
+                _key367 = iprot.readString();
                 {
-                  org.apache.thrift.protocol.TList _list361 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-                  _val360 = new ArrayList<Mutation>(_list361.size);
-                  for (int _i362 = 0; _i362 < _list361.size; ++_i362)
+                  org.apache.thrift.protocol.TList _list369 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+                  _val368 = new ArrayList<Mutation>(_list369.size);
+                  for (int _i370 = 0; _i370 < _list369.size; ++_i370)
                   {
-                    Mutation _elem363;
-                    _elem363 = new Mutation();
-                    _elem363.read(iprot);
-                    _val360.add(_elem363);
+                    Mutation _elem371;
+                    _elem371 = new Mutation();
+                    _elem371.read(iprot);
+                    _val368.add(_elem371);
                   }
                 }
-                _val356.put(_key359, _val360);
+                _val364.put(_key367, _val368);
               }
             }
-            struct.mutation_map.put(_key355, _val356);
+            struct.mutation_map.put(_key363, _val364);
           }
         }
         struct.setMutation_mapIsSet(true);
@@ -30482,20 +30652,22 @@ public class Cassandra {
 
   }
 
-  public static class describe_schema_versions_args implements org.apache.thrift.TBase<describe_schema_versions_args, describe_schema_versions_args._Fields>, java.io.Serializable, Cloneable, Comparable<describe_schema_versions_args>   {
-    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("describe_schema_versions_args");
+  public static class get_multi_slice_args implements org.apache.thrift.TBase<get_multi_slice_args, get_multi_slice_args._Fields>, java.io.Serializable, Cloneable, Comparable<get_multi_slice_args>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_multi_slice_args");
 
+    private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
-      schemes.put(StandardScheme.class, new describe_schema_versions_argsStandardSchemeFactory());
-      schemes.put(TupleScheme.class, new describe_schema_versions_argsTupleSchemeFactory());
+      schemes.put(StandardScheme.class, new get_multi_slice_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new get_multi_slice_argsTupleSchemeFactory());
     }
 
+    public MultiSliceRequest request; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
     public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-;
+      REQUEST((short)1, "request");
 
       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -30510,6 +30682,8 @@ public class Cassandra {
        */
       public static _Fields findByThriftId(int fieldId) {
         switch(fieldId) {
+          case 1: // REQUEST
+            return REQUEST;
           default:
             return null;
         }
@@ -30548,37 +30722,87 @@ public class Cassandra {
         return _fieldName;
       }
     }
+
+    // isset id assignments
     public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
     static {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MultiSliceRequest.class)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
-      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(describe_schema_versions_args.class, metaDataMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_multi_slice_args.class, metaDataMap);
     }
 
-    public describe_schema_versions_args() {
+    public get_multi_slice_args() {
+    }
+
+    public get_multi_slice_args(
+      MultiSliceRequest request)
+    {
+      this();
+      this.request = request;
     }
 
     /**
      * Performs a deep copy on <i>other</i>.
      */
-    public describe_schema_versions_args(describe_schema_versions_args other) {
+    public get_multi_slice_args(get_multi_slice_args other) {
+      if (other.isSetRequest()) {
+        this.request = new MultiSliceRequest(other.request);
+      }
     }
 
-    public describe_schema_versions_args deepCopy() {
-      return new describe_schema_versions_args(this);
+    public get_multi_slice_args deepCop

<TRUNCATED>

[5/6] git commit: Handle overlapping multislices in thrift and cql

Posted by ja...@apache.org.
Handle overlapping multislices in thrift and cql

Patch by pcmanus and tjake; reviewed by tjake for CASSANDRA-7279


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/e9e91d7b
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/e9e91d7b
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/e9e91d7b

Branch: refs/heads/trunk
Commit: e9e91d7b505d5c76cf099b7792a1c884276b56c0
Parents: 7f63b1f
Author: Jake Luciani <ja...@apache.org>
Authored: Wed May 28 13:14:43 2014 -0400
Committer: Jake Luciani <ja...@apache.org>
Committed: Wed May 28 13:14:43 2014 -0400

----------------------------------------------------------------------
 CHANGES.txt                                     |   1 +
 .../cql3/statements/CQL3CasConditions.java      |   1 +
 .../cql3/statements/SelectStatement.java        |   1 +
 .../cassandra/db/composites/AbstractCType.java  |   5 +
 .../apache/cassandra/db/filter/ColumnSlice.java |  97 +++++++++++++
 .../cassandra/thrift/CassandraServer.java       |   6 +-
 .../cassandra/db/filter/ColumnSliceTest.java    | 137 +++++++++++++++++--
 .../apache/cassandra/thrift/MultiSliceTest.java |  20 ++-
 8 files changed, 252 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/e9e91d7b/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index a7cc872..8e0dead 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -26,6 +26,7 @@
  * Fix IllegalArgumentException in CqlStorage (CASSANDRA-7287)
  * Allow nulls/non-existant fields in UDT (CASSANDRA-7206)
  * Backport Thrift MultiSliceRequest (CASSANDRA-7027)
+ * Handle overlapping MultiSlices (CASSANDRA-7279)
 Merged from 2.0:
  * Copy compaction options to make sure they are reloaded (CASSANDRA-7290)
  * Add option to do more aggressive tombstone compactions (CASSANDRA-6563)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e9e91d7b/src/java/org/apache/cassandra/cql3/statements/CQL3CasConditions.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/CQL3CasConditions.java b/src/java/org/apache/cassandra/cql3/statements/CQL3CasConditions.java
index b06b2ee..8b5a403 100644
--- a/src/java/org/apache/cassandra/cql3/statements/CQL3CasConditions.java
+++ b/src/java/org/apache/cassandra/cql3/statements/CQL3CasConditions.java
@@ -98,6 +98,7 @@ public class CQL3CasConditions implements CASConditions
             slices[i++] = prefix.slice();
 
         int toGroup = cfm.comparator.isDense() ? -1 : cfm.clusteringColumns().size();
+        assert ColumnSlice.validateSlices(slices, cfm.comparator, false);
         return new SliceQueryFilter(slices, false, slices.length, toGroup);
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e9e91d7b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
index 501ef45..d484c5f 100644
--- a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
@@ -533,6 +533,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
 
     private SliceQueryFilter sliceFilter(ColumnSlice[] slices, int limit, int toGroup)
     {
+        assert ColumnSlice.validateSlices(slices, cfm.comparator, isReversed) : String.format("Invalid slices: " + Arrays.toString(slices) + (isReversed ? " (reversed)" : ""));
         return new SliceQueryFilter(slices, isReversed, limit, toGroup);
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e9e91d7b/src/java/org/apache/cassandra/db/composites/AbstractCType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/AbstractCType.java b/src/java/org/apache/cassandra/db/composites/AbstractCType.java
index 0e73397..e299e42 100644
--- a/src/java/org/apache/cassandra/db/composites/AbstractCType.java
+++ b/src/java/org/apache/cassandra/db/composites/AbstractCType.java
@@ -60,6 +60,11 @@ public abstract class AbstractCType implements CType
         {
             public int compare(Composite c1, Composite c2)
             {
+                if (c1.isEmpty())
+                    return c2.isEmpty() ? 0 : -1;
+                if (c2.isEmpty())
+                    return 1;
+
                 return AbstractCType.this.compare(c2, c1);
             }
         };

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e9e91d7b/src/java/org/apache/cassandra/db/filter/ColumnSlice.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/filter/ColumnSlice.java b/src/java/org/apache/cassandra/db/filter/ColumnSlice.java
index a945114..bca4743 100644
--- a/src/java/org/apache/cassandra/db/filter/ColumnSlice.java
+++ b/src/java/org/apache/cassandra/db/filter/ColumnSlice.java
@@ -19,6 +19,8 @@ package org.apache.cassandra.db.filter;
 
 import java.io.*;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Comparator;
 import java.util.List;
 
@@ -108,6 +110,101 @@ public class ColumnSlice
         return 0;
     }
 
+    /**
+     * Validates that the provided slice array contains only non-overlapped slices valid for a query {@code reversed}
+     * or not on a table using {@code comparator}.
+     */
+    public static boolean validateSlices(ColumnSlice[] slices, CellNameType comparator, boolean reversed)
+    {
+        return validateSlices(slices, reversed ? comparator.reverseComparator() : comparator);
+    }
+
+    /**
+     * Validates that the provided slice array contains only non-overlapped slices in {@code comparator} order.
+     */
+    public static boolean validateSlices(ColumnSlice[] slices, Comparator<Composite> comparator)
+    {
+        for (int i = 0; i < slices.length; i++)
+        {
+            if (i > 0 && comparator.compare(slices[i-1].finish, slices[i].start) >= 0)
+                return false;
+
+            if (slices[i].finish.isEmpty())
+                return i == slices.length - 1;
+
+            if (comparator.compare(slices[i].start, slices[i].finish) > 0)
+                return false;
+        }
+        return true;
+    }
+
+    /**
+     * Takes an array of slices (potentially overlapping and in any order, though each individual slice must have
+     * its start before or equal its end in {@code comparator} orde) and return an equivalent array of non-overlapping
+     * slices in {@code comparator order}.
+     *
+     * @param slices an array of slices. This may be modified by this method.
+     * @param comparator the order in which to sort the slices.
+     * @return the smallest possible array of non-overlapping slices in {@code compator} order. If the original
+     * slices are already non-overlapping and in comparator order, this may or may not return the provided slices
+     * directly.
+     */
+    public static ColumnSlice[] deoverlapSlices(ColumnSlice[] slices, final Comparator<Composite> comparator)
+    {
+        if (slices.length <= 1)
+            return slices;
+
+        Arrays.sort(slices, new Comparator<ColumnSlice>()
+        {
+            @Override
+            public int compare(ColumnSlice s1, ColumnSlice s2)
+            {
+                int c = comparator.compare(s1.start, s2.start);
+                if (c != 0)
+                    return c;
+
+                // For the finish, empty always means greater
+                return s1.finish.isEmpty() || s2.finish.isEmpty()
+                     ? s1.finish.isEmpty() ? 1 : s2.finish.isEmpty() ? -1 : 0
+                     : comparator.compare(s1.finish, s2.finish);
+            }
+        });
+
+        List<ColumnSlice> slicesCopy = new ArrayList<>(slices.length);
+
+        ColumnSlice last = slices[0];
+
+        for (int i = 1; i < slices.length; i++)
+        {
+            ColumnSlice s2 = slices[i];
+
+            boolean includesStart = last.includes(comparator, s2.start);
+            boolean includesFinish = s2.finish.isEmpty() ? last.finish.isEmpty() : last.includes(comparator, s2.finish);
+
+            if (includesStart && includesFinish)
+                continue;
+
+            if (!includesStart && !includesFinish)
+            {
+                slicesCopy.add(last);
+                last = s2;
+                continue;
+            }
+
+            if (includesStart)
+            {
+                last = new ColumnSlice(last.start, s2.finish);
+                continue;
+            }
+
+            assert !includesFinish;
+        }
+
+        slicesCopy.add(last);
+
+        return slicesCopy.toArray(new ColumnSlice[slicesCopy.size()]);
+    }
+
     @Override
     public final int hashCode()
     {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e9e91d7b/src/java/org/apache/cassandra/thrift/CassandraServer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/CassandraServer.java b/src/java/org/apache/cassandra/thrift/CassandraServer.java
index 917e4cb..1a77ffa 100644
--- a/src/java/org/apache/cassandra/thrift/CassandraServer.java
+++ b/src/java/org/apache/cassandra/thrift/CassandraServer.java
@@ -2065,7 +2065,7 @@ public class CassandraServer implements Cassandra.Iface
             org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(request.getConsistency_level());
             consistencyLevel.validateForRead(keyspace);
             List<ReadCommand> commands = new ArrayList<>(1);
-            ColumnSlice [] slices = new ColumnSlice[request.getColumn_slices().size()];
+            ColumnSlice[] slices = new ColumnSlice[request.getColumn_slices().size()];
             for (int i = 0 ; i < request.getColumn_slices().size() ; i++)
             {
                 fixOptionalSliceParameters(request.getColumn_slices().get(i));
@@ -2078,7 +2078,9 @@ public class CassandraServer implements Cassandra.Iface
                     throw new InvalidRequestException(String.format("Reversed column slice at index %d had start less than finish", i));
                 slices[i] = new ColumnSlice(start, finish);
             }
-            SliceQueryFilter filter = new SliceQueryFilter(slices, request.reversed, request.count);
+
+            ColumnSlice[] deoverlapped = ColumnSlice.deoverlapSlices(slices, request.reversed ? metadata.comparator.reverseComparator() : metadata.comparator);
+            SliceQueryFilter filter = new SliceQueryFilter(deoverlapped, request.reversed, request.count);
             ThriftValidation.validateKey(metadata, request.key);
             commands.add(ReadCommand.create(keyspace, request.key, request.column_parent.getColumn_family(), System.currentTimeMillis(), filter));
             return getSlice(commands, request.column_parent.isSetSuper_column(), consistencyLevel).entrySet().iterator().next().getValue();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e9e91d7b/test/unit/org/apache/cassandra/db/filter/ColumnSliceTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/filter/ColumnSliceTest.java b/test/unit/org/apache/cassandra/db/filter/ColumnSliceTest.java
index 6553de5..2dc3744 100644
--- a/test/unit/org/apache/cassandra/db/filter/ColumnSliceTest.java
+++ b/test/unit/org/apache/cassandra/db/filter/ColumnSliceTest.java
@@ -18,22 +18,23 @@
  * */
 package org.apache.cassandra.db.filter;
 
-import org.apache.cassandra.db.composites.Composite;
-import org.apache.cassandra.db.composites.CompoundDenseCellNameType;
+
+import java.nio.ByteBuffer;
+import java.util.*;
+
+import org.junit.Test;
+
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.junit.Test;
 
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
 
 public class ColumnSliceTest
 {
+    private static final CellNameType simpleIntType = new SimpleDenseCellNameType(Int32Type.instance);
+
     @Test
     public void testIntersectsSingleSlice()
     {
@@ -278,6 +279,65 @@ public class ColumnSliceTest
         assertTrue(slice.intersects(columnNames(1, 0, 0), columnNames(2, 2, 2), nameType, true));
     }
 
+    @Test
+    public void testDeoverlapSlices()
+    {
+        ColumnSlice[] slices;
+        ColumnSlice[] deoverlapped;
+
+        // Preserve correct slices
+        slices = slices(s(0, 3), s(4, 5), s(6, 9));
+        assertSlicesValid(slices);
+        assertSlicesEquals(slices, deoverlapSlices(slices));
+
+        // Simple overlap
+        slices = slices(s(0, 3), s(2, 5), s(8, 9));
+        assertSlicesInvalid(slices);
+        assertSlicesEquals(slices(s(0, 5), s(8, 9)), deoverlapSlices(slices));
+
+        // Slice overlaps others fully
+        slices = slices(s(0, 10), s(2, 5), s(8, 9));
+        assertSlicesInvalid(slices);
+        assertSlicesEquals(slices(s(0, 10)), deoverlapSlices(slices));
+
+        // Slice with empty end overlaps others fully
+        slices = slices(s(0, -1), s(2, 5), s(8, 9));
+        assertSlicesInvalid(slices);
+        assertSlicesEquals(slices(s(0, -1)), deoverlapSlices(slices));
+
+        // Overlap with slices selecting only one element
+        slices = slices(s(0, 4), s(4, 4), s(4, 8));
+        assertSlicesInvalid(slices);
+        assertSlicesEquals(slices(s(0, 8)), deoverlapSlices(slices));
+
+        // Unordered slices (without overlap)
+        slices = slices(s(4, 8), s(0, 3), s(9, 9));
+        assertSlicesInvalid(slices);
+        assertSlicesEquals(slices(s(0, 3), s(4, 8), s(9, 9)), deoverlapSlices(slices));
+
+        // All range select but not by a single slice
+        slices = slices(s(5, -1), s(2, 5), s(-1, 2));
+        assertSlicesInvalid(slices);
+        assertSlicesEquals(slices(s(-1, -1)), deoverlapSlices(slices));
+    }
+
+    @Test
+    public void testValidateSlices()
+    {
+        assertSlicesValid(slices(s(0, 3)));
+        assertSlicesValid(slices(s(3, 3)));
+        assertSlicesValid(slices(s(3, 3), s(4, 4)));
+        assertSlicesValid(slices(s(0, 3), s(4, 5), s(6, 9)));
+        assertSlicesValid(slices(s(-1, -1)));
+        assertSlicesValid(slices(s(-1, 3), s(4, -1)));
+
+        assertSlicesInvalid(slices(s(3, 0)));
+        assertSlicesInvalid(slices(s(0, 2), s(2, 4)));
+        assertSlicesInvalid(slices(s(0, 2), s(1, 4)));
+        assertSlicesInvalid(slices(s(0, 2), s(3, 4), s(3, 4)));
+        assertSlicesInvalid(slices(s(-1, 2), s(3, -1), s(5, 9)));
+    }
+
     private static Composite composite(Integer ... components)
     {
         List<AbstractType<?>> types = new ArrayList<>();
@@ -295,4 +355,61 @@ public class ColumnSliceTest
             names.add(ByteBufferUtil.bytes(component));
         return names;
     }
-}
\ No newline at end of file
+
+    private static Composite simpleComposite(int i)
+    {
+        // We special negative values to mean EMPTY for convenience sake
+        if (i < 0)
+            return Composites.EMPTY;
+
+        return simpleIntType.make(i);
+    }
+
+    private static ColumnSlice s(int start, int finish)
+    {
+        return new ColumnSlice(simpleComposite(start), simpleComposite(finish));
+    }
+
+    private static ColumnSlice[] slices(ColumnSlice... slices)
+    {
+        return slices;
+    }
+
+    private static ColumnSlice[] deoverlapSlices(ColumnSlice[] slices)
+    {
+        return ColumnSlice.deoverlapSlices(slices, simpleIntType);
+    }
+
+    private static void assertSlicesValid(ColumnSlice[] slices)
+    {
+        assertTrue("Slices " + toString(slices) + " should be valid", ColumnSlice.validateSlices(slices, simpleIntType));
+    }
+
+    private static void assertSlicesInvalid(ColumnSlice[] slices)
+    {
+        assertFalse("Slices " + toString(slices) + " shouldn't be valid", ColumnSlice.validateSlices(slices, simpleIntType));
+    }
+
+    private static void assertSlicesEquals(ColumnSlice[] expected, ColumnSlice[] actual)
+    {
+        assertTrue("Expected " + toString(expected) + " but got " + toString(actual), Arrays.equals(expected, actual));
+    }
+
+    private static String toString(ColumnSlice[] slices)
+    {
+        StringBuilder sb = new StringBuilder().append("[");
+        for (int i = 0; i < slices.length; i++)
+        {
+            if (i > 0)
+                sb.append(", ");
+
+            ColumnSlice slice = slices[i];
+            sb.append("(");
+            sb.append(slice.start.isEmpty() ? "-1" : simpleIntType.getString(slice.start));
+            sb.append(", ");
+            sb.append(slice.finish.isEmpty() ? "-1" : simpleIntType.getString(slice.finish));
+            sb.append(")");
+        }
+        return sb.append("]").toString();
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e9e91d7b/test/unit/org/apache/cassandra/thrift/MultiSliceTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/thrift/MultiSliceTest.java b/test/unit/org/apache/cassandra/thrift/MultiSliceTest.java
index d1c913b..9193258 100644
--- a/test/unit/org/apache/cassandra/thrift/MultiSliceTest.java
+++ b/test/unit/org/apache/cassandra/thrift/MultiSliceTest.java
@@ -114,9 +114,21 @@ public class MultiSliceTest extends SchemaLoader
         req.setCount(6);
         req.reversed = true;
         req.setColumn_slices(Arrays.asList(columnSliceFrom("e", "a"), columnSliceFrom("g", "d")));
-        assertColumnNameMatches(Arrays.asList("g", "e", "d", "c", "b", "a"), server.get_multi_slice(req)); 
+        assertColumnNameMatches(Arrays.asList("g", "f", "e", "d", "c", "b"), server.get_multi_slice(req));
     }
-    
+
+    @Test
+    public void test_with_overlap_with_count() throws TException
+    {
+        ColumnParent cp = new ColumnParent("Standard1");
+        ByteBuffer key = ByteBuffer.wrap("overlap_reversed_count".getBytes());
+        addTheAlphabetToRow(key, cp);
+        MultiSliceRequest req = makeMultiSliceRequest(key);
+        req.setCount(6);
+        req.setColumn_slices(Arrays.asList(columnSliceFrom("a", "e"), columnSliceFrom("d", "g"), columnSliceFrom("d", "g")));
+        assertColumnNameMatches(Arrays.asList("a", "b", "c", "d", "e", "f"), server.get_multi_slice(req));
+    }
+
     private static void addTheAlphabetToRow(ByteBuffer key, ColumnParent parent) 
             throws InvalidRequestException, UnavailableException, TimedOutException
     {
@@ -135,7 +147,7 @@ public class MultiSliceTest extends SchemaLoader
         for (int i = 0 ; i< expected.size() ; i++)
         {
             Assert.assertEquals(actual.get(i) +" did not equal "+ expected.get(i), 
-                    new String(actual.get(i).getColumn().getName()), expected.get(i));
+                    expected.get(i), new String(actual.get(i).getColumn().getName()));
         }
     }
     
@@ -146,4 +158,4 @@ public class MultiSliceTest extends SchemaLoader
         cs.setFinish(ByteBufferUtil.bytes(endInclusive));
         return cs;
     }
-}
\ No newline at end of file
+}


[2/6] Backport MultiSliceRequest

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/7f63b1f9/interface/thrift/gen-java/org/apache/cassandra/thrift/CfDef.java
----------------------------------------------------------------------
diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/CfDef.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/CfDef.java
index 4f6912e..ec10050 100644
--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/CfDef.java
+++ b/interface/thrift/gen-java/org/apache/cassandra/thrift/CfDef.java
@@ -78,7 +78,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
   private static final org.apache.thrift.protocol.TField BLOOM_FILTER_FP_CHANCE_FIELD_DESC = new org.apache.thrift.protocol.TField("bloom_filter_fp_chance", org.apache.thrift.protocol.TType.DOUBLE, (short)33);
   private static final org.apache.thrift.protocol.TField CACHING_FIELD_DESC = new org.apache.thrift.protocol.TField("caching", org.apache.thrift.protocol.TType.STRING, (short)34);
   private static final org.apache.thrift.protocol.TField DCLOCAL_READ_REPAIR_CHANCE_FIELD_DESC = new org.apache.thrift.protocol.TField("dclocal_read_repair_chance", org.apache.thrift.protocol.TType.DOUBLE, (short)37);
-  private static final org.apache.thrift.protocol.TField POPULATE_IO_CACHE_ON_FLUSH_FIELD_DESC = new org.apache.thrift.protocol.TField("populate_io_cache_on_flush", org.apache.thrift.protocol.TType.BOOL, (short)38);
   private static final org.apache.thrift.protocol.TField MEMTABLE_FLUSH_PERIOD_IN_MS_FIELD_DESC = new org.apache.thrift.protocol.TField("memtable_flush_period_in_ms", org.apache.thrift.protocol.TType.I32, (short)39);
   private static final org.apache.thrift.protocol.TField DEFAULT_TIME_TO_LIVE_FIELD_DESC = new org.apache.thrift.protocol.TField("default_time_to_live", org.apache.thrift.protocol.TType.I32, (short)40);
   private static final org.apache.thrift.protocol.TField SPECULATIVE_RETRY_FIELD_DESC = new org.apache.thrift.protocol.TField("speculative_retry", org.apache.thrift.protocol.TType.STRING, (short)42);
@@ -97,6 +96,7 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
   private static final org.apache.thrift.protocol.TField MERGE_SHARDS_CHANCE_FIELD_DESC = new org.apache.thrift.protocol.TField("merge_shards_chance", org.apache.thrift.protocol.TType.DOUBLE, (short)25);
   private static final org.apache.thrift.protocol.TField ROW_CACHE_PROVIDER_FIELD_DESC = new org.apache.thrift.protocol.TField("row_cache_provider", org.apache.thrift.protocol.TType.STRING, (short)27);
   private static final org.apache.thrift.protocol.TField ROW_CACHE_KEYS_TO_SAVE_FIELD_DESC = new org.apache.thrift.protocol.TField("row_cache_keys_to_save", org.apache.thrift.protocol.TType.I32, (short)31);
+  private static final org.apache.thrift.protocol.TField POPULATE_IO_CACHE_ON_FLUSH_FIELD_DESC = new org.apache.thrift.protocol.TField("populate_io_cache_on_flush", org.apache.thrift.protocol.TType.BOOL, (short)38);
   private static final org.apache.thrift.protocol.TField INDEX_INTERVAL_FIELD_DESC = new org.apache.thrift.protocol.TField("index_interval", org.apache.thrift.protocol.TType.I32, (short)41);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
@@ -126,7 +126,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
   public double bloom_filter_fp_chance; // optional
   public String caching; // optional
   public double dclocal_read_repair_chance; // optional
-  public boolean populate_io_cache_on_flush; // optional
   public int memtable_flush_period_in_ms; // optional
   public int default_time_to_live; // optional
   public String speculative_retry; // optional
@@ -181,6 +180,10 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
   /**
    * @deprecated
    */
+  public boolean populate_io_cache_on_flush; // optional
+  /**
+   * @deprecated
+   */
   public int index_interval; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@@ -206,7 +209,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
     BLOOM_FILTER_FP_CHANCE((short)33, "bloom_filter_fp_chance"),
     CACHING((short)34, "caching"),
     DCLOCAL_READ_REPAIR_CHANCE((short)37, "dclocal_read_repair_chance"),
-    POPULATE_IO_CACHE_ON_FLUSH((short)38, "populate_io_cache_on_flush"),
     MEMTABLE_FLUSH_PERIOD_IN_MS((short)39, "memtable_flush_period_in_ms"),
     DEFAULT_TIME_TO_LIVE((short)40, "default_time_to_live"),
     SPECULATIVE_RETRY((short)42, "speculative_retry"),
@@ -261,6 +263,10 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
     /**
      * @deprecated
      */
+    POPULATE_IO_CACHE_ON_FLUSH((short)38, "populate_io_cache_on_flush"),
+    /**
+     * @deprecated
+     */
     INDEX_INTERVAL((short)41, "index_interval");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -318,8 +324,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
           return CACHING;
         case 37: // DCLOCAL_READ_REPAIR_CHANCE
           return DCLOCAL_READ_REPAIR_CHANCE;
-        case 38: // POPULATE_IO_CACHE_ON_FLUSH
-          return POPULATE_IO_CACHE_ON_FLUSH;
         case 39: // MEMTABLE_FLUSH_PERIOD_IN_MS
           return MEMTABLE_FLUSH_PERIOD_IN_MS;
         case 40: // DEFAULT_TIME_TO_LIVE
@@ -356,6 +360,8 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
           return ROW_CACHE_PROVIDER;
         case 31: // ROW_CACHE_KEYS_TO_SAVE
           return ROW_CACHE_KEYS_TO_SAVE;
+        case 38: // POPULATE_IO_CACHE_ON_FLUSH
+          return POPULATE_IO_CACHE_ON_FLUSH;
         case 41: // INDEX_INTERVAL
           return INDEX_INTERVAL;
         default:
@@ -405,24 +411,24 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
   private static final int __MAX_COMPACTION_THRESHOLD_ISSET_ID = 4;
   private static final int __BLOOM_FILTER_FP_CHANCE_ISSET_ID = 5;
   private static final int __DCLOCAL_READ_REPAIR_CHANCE_ISSET_ID = 6;
-  private static final int __POPULATE_IO_CACHE_ON_FLUSH_ISSET_ID = 7;
-  private static final int __MEMTABLE_FLUSH_PERIOD_IN_MS_ISSET_ID = 8;
-  private static final int __DEFAULT_TIME_TO_LIVE_ISSET_ID = 9;
-  private static final int __MIN_INDEX_INTERVAL_ISSET_ID = 10;
-  private static final int __MAX_INDEX_INTERVAL_ISSET_ID = 11;
-  private static final int __ROW_CACHE_SIZE_ISSET_ID = 12;
-  private static final int __KEY_CACHE_SIZE_ISSET_ID = 13;
-  private static final int __ROW_CACHE_SAVE_PERIOD_IN_SECONDS_ISSET_ID = 14;
-  private static final int __KEY_CACHE_SAVE_PERIOD_IN_SECONDS_ISSET_ID = 15;
-  private static final int __MEMTABLE_FLUSH_AFTER_MINS_ISSET_ID = 16;
-  private static final int __MEMTABLE_THROUGHPUT_IN_MB_ISSET_ID = 17;
-  private static final int __MEMTABLE_OPERATIONS_IN_MILLIONS_ISSET_ID = 18;
-  private static final int __REPLICATE_ON_WRITE_ISSET_ID = 19;
-  private static final int __MERGE_SHARDS_CHANCE_ISSET_ID = 20;
-  private static final int __ROW_CACHE_KEYS_TO_SAVE_ISSET_ID = 21;
+  private static final int __MEMTABLE_FLUSH_PERIOD_IN_MS_ISSET_ID = 7;
+  private static final int __DEFAULT_TIME_TO_LIVE_ISSET_ID = 8;
+  private static final int __MIN_INDEX_INTERVAL_ISSET_ID = 9;
+  private static final int __MAX_INDEX_INTERVAL_ISSET_ID = 10;
+  private static final int __ROW_CACHE_SIZE_ISSET_ID = 11;
+  private static final int __KEY_CACHE_SIZE_ISSET_ID = 12;
+  private static final int __ROW_CACHE_SAVE_PERIOD_IN_SECONDS_ISSET_ID = 13;
+  private static final int __KEY_CACHE_SAVE_PERIOD_IN_SECONDS_ISSET_ID = 14;
+  private static final int __MEMTABLE_FLUSH_AFTER_MINS_ISSET_ID = 15;
+  private static final int __MEMTABLE_THROUGHPUT_IN_MB_ISSET_ID = 16;
+  private static final int __MEMTABLE_OPERATIONS_IN_MILLIONS_ISSET_ID = 17;
+  private static final int __REPLICATE_ON_WRITE_ISSET_ID = 18;
+  private static final int __MERGE_SHARDS_CHANCE_ISSET_ID = 19;
+  private static final int __ROW_CACHE_KEYS_TO_SAVE_ISSET_ID = 20;
+  private static final int __POPULATE_IO_CACHE_ON_FLUSH_ISSET_ID = 21;
   private static final int __INDEX_INTERVAL_ISSET_ID = 22;
   private int __isset_bitfield = 0;
-  private _Fields optionals[] = {_Fields.COLUMN_TYPE,_Fields.COMPARATOR_TYPE,_Fields.SUBCOMPARATOR_TYPE,_Fields.COMMENT,_Fields.READ_REPAIR_CHANCE,_Fields.COLUMN_METADATA,_Fields.GC_GRACE_SECONDS,_Fields.DEFAULT_VALIDATION_CLASS,_Fields.ID,_Fields.MIN_COMPACTION_THRESHOLD,_Fields.MAX_COMPACTION_THRESHOLD,_Fields.KEY_VALIDATION_CLASS,_Fields.KEY_ALIAS,_Fields.COMPACTION_STRATEGY,_Fields.COMPACTION_STRATEGY_OPTIONS,_Fields.COMPRESSION_OPTIONS,_Fields.BLOOM_FILTER_FP_CHANCE,_Fields.CACHING,_Fields.DCLOCAL_READ_REPAIR_CHANCE,_Fields.POPULATE_IO_CACHE_ON_FLUSH,_Fields.MEMTABLE_FLUSH_PERIOD_IN_MS,_Fields.DEFAULT_TIME_TO_LIVE,_Fields.SPECULATIVE_RETRY,_Fields.TRIGGERS,_Fields.CELLS_PER_ROW_TO_CACHE,_Fields.MIN_INDEX_INTERVAL,_Fields.MAX_INDEX_INTERVAL,_Fields.ROW_CACHE_SIZE,_Fields.KEY_CACHE_SIZE,_Fields.ROW_CACHE_SAVE_PERIOD_IN_SECONDS,_Fields.KEY_CACHE_SAVE_PERIOD_IN_SECONDS,_Fields.MEMTABLE_FLUSH_AFTER_MINS,_Fields.MEMTABLE_THROUGHPUT_IN_MB,_Fields.MEMTABLE_OPERATIONS_IN_MILLIONS,_Field
 s.REPLICATE_ON_WRITE,_Fields.MERGE_SHARDS_CHANCE,_Fields.ROW_CACHE_PROVIDER,_Fields.ROW_CACHE_KEYS_TO_SAVE,_Fields.INDEX_INTERVAL};
+  private _Fields optionals[] = {_Fields.COLUMN_TYPE,_Fields.COMPARATOR_TYPE,_Fields.SUBCOMPARATOR_TYPE,_Fields.COMMENT,_Fields.READ_REPAIR_CHANCE,_Fields.COLUMN_METADATA,_Fields.GC_GRACE_SECONDS,_Fields.DEFAULT_VALIDATION_CLASS,_Fields.ID,_Fields.MIN_COMPACTION_THRESHOLD,_Fields.MAX_COMPACTION_THRESHOLD,_Fields.KEY_VALIDATION_CLASS,_Fields.KEY_ALIAS,_Fields.COMPACTION_STRATEGY,_Fields.COMPACTION_STRATEGY_OPTIONS,_Fields.COMPRESSION_OPTIONS,_Fields.BLOOM_FILTER_FP_CHANCE,_Fields.CACHING,_Fields.DCLOCAL_READ_REPAIR_CHANCE,_Fields.MEMTABLE_FLUSH_PERIOD_IN_MS,_Fields.DEFAULT_TIME_TO_LIVE,_Fields.SPECULATIVE_RETRY,_Fields.TRIGGERS,_Fields.CELLS_PER_ROW_TO_CACHE,_Fields.MIN_INDEX_INTERVAL,_Fields.MAX_INDEX_INTERVAL,_Fields.ROW_CACHE_SIZE,_Fields.KEY_CACHE_SIZE,_Fields.ROW_CACHE_SAVE_PERIOD_IN_SECONDS,_Fields.KEY_CACHE_SAVE_PERIOD_IN_SECONDS,_Fields.MEMTABLE_FLUSH_AFTER_MINS,_Fields.MEMTABLE_THROUGHPUT_IN_MB,_Fields.MEMTABLE_OPERATIONS_IN_MILLIONS,_Fields.REPLICATE_ON_WRITE,_Fields.MERGE_
 SHARDS_CHANCE,_Fields.ROW_CACHE_PROVIDER,_Fields.ROW_CACHE_KEYS_TO_SAVE,_Fields.POPULATE_IO_CACHE_ON_FLUSH,_Fields.INDEX_INTERVAL};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -473,8 +479,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     tmpMap.put(_Fields.DCLOCAL_READ_REPAIR_CHANCE, new org.apache.thrift.meta_data.FieldMetaData("dclocal_read_repair_chance", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
-    tmpMap.put(_Fields.POPULATE_IO_CACHE_ON_FLUSH, new org.apache.thrift.meta_data.FieldMetaData("populate_io_cache_on_flush", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
     tmpMap.put(_Fields.MEMTABLE_FLUSH_PERIOD_IN_MS, new org.apache.thrift.meta_data.FieldMetaData("memtable_flush_period_in_ms", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     tmpMap.put(_Fields.DEFAULT_TIME_TO_LIVE, new org.apache.thrift.meta_data.FieldMetaData("default_time_to_live", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
@@ -512,6 +516,8 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     tmpMap.put(_Fields.ROW_CACHE_KEYS_TO_SAVE, new org.apache.thrift.meta_data.FieldMetaData("row_cache_keys_to_save", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.POPULATE_IO_CACHE_ON_FLUSH, new org.apache.thrift.meta_data.FieldMetaData("populate_io_cache_on_flush", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
     tmpMap.put(_Fields.INDEX_INTERVAL, new org.apache.thrift.meta_data.FieldMetaData("index_interval", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
@@ -603,7 +609,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
       this.caching = other.caching;
     }
     this.dclocal_read_repair_chance = other.dclocal_read_repair_chance;
-    this.populate_io_cache_on_flush = other.populate_io_cache_on_flush;
     this.memtable_flush_period_in_ms = other.memtable_flush_period_in_ms;
     this.default_time_to_live = other.default_time_to_live;
     if (other.isSetSpeculative_retry()) {
@@ -634,6 +639,7 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
       this.row_cache_provider = other.row_cache_provider;
     }
     this.row_cache_keys_to_save = other.row_cache_keys_to_save;
+    this.populate_io_cache_on_flush = other.populate_io_cache_on_flush;
     this.index_interval = other.index_interval;
   }
 
@@ -674,8 +680,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
 
     this.dclocal_read_repair_chance = 0;
 
-    setPopulate_io_cache_on_flushIsSet(false);
-    this.populate_io_cache_on_flush = false;
     setMemtable_flush_period_in_msIsSet(false);
     this.memtable_flush_period_in_ms = 0;
     setDefault_time_to_liveIsSet(false);
@@ -710,6 +714,8 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
     this.row_cache_provider = null;
     setRow_cache_keys_to_saveIsSet(false);
     this.row_cache_keys_to_save = 0;
+    setPopulate_io_cache_on_flushIsSet(false);
+    this.populate_io_cache_on_flush = false;
     setIndex_intervalIsSet(false);
     this.index_interval = 0;
   }
@@ -1258,29 +1264,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DCLOCAL_READ_REPAIR_CHANCE_ISSET_ID, value);
   }
 
-  public boolean isPopulate_io_cache_on_flush() {
-    return this.populate_io_cache_on_flush;
-  }
-
-  public CfDef setPopulate_io_cache_on_flush(boolean populate_io_cache_on_flush) {
-    this.populate_io_cache_on_flush = populate_io_cache_on_flush;
-    setPopulate_io_cache_on_flushIsSet(true);
-    return this;
-  }
-
-  public void unsetPopulate_io_cache_on_flush() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __POPULATE_IO_CACHE_ON_FLUSH_ISSET_ID);
-  }
-
-  /** Returns true if field populate_io_cache_on_flush is set (has been assigned a value) and false otherwise */
-  public boolean isSetPopulate_io_cache_on_flush() {
-    return EncodingUtils.testBit(__isset_bitfield, __POPULATE_IO_CACHE_ON_FLUSH_ISSET_ID);
-  }
-
-  public void setPopulate_io_cache_on_flushIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __POPULATE_IO_CACHE_ON_FLUSH_ISSET_ID, value);
-  }
-
   public int getMemtable_flush_period_in_ms() {
     return this.memtable_flush_period_in_ms;
   }
@@ -1783,6 +1766,35 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
   /**
    * @deprecated
    */
+  public boolean isPopulate_io_cache_on_flush() {
+    return this.populate_io_cache_on_flush;
+  }
+
+  /**
+   * @deprecated
+   */
+  public CfDef setPopulate_io_cache_on_flush(boolean populate_io_cache_on_flush) {
+    this.populate_io_cache_on_flush = populate_io_cache_on_flush;
+    setPopulate_io_cache_on_flushIsSet(true);
+    return this;
+  }
+
+  public void unsetPopulate_io_cache_on_flush() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __POPULATE_IO_CACHE_ON_FLUSH_ISSET_ID);
+  }
+
+  /** Returns true if field populate_io_cache_on_flush is set (has been assigned a value) and false otherwise */
+  public boolean isSetPopulate_io_cache_on_flush() {
+    return EncodingUtils.testBit(__isset_bitfield, __POPULATE_IO_CACHE_ON_FLUSH_ISSET_ID);
+  }
+
+  public void setPopulate_io_cache_on_flushIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __POPULATE_IO_CACHE_ON_FLUSH_ISSET_ID, value);
+  }
+
+  /**
+   * @deprecated
+   */
   public int getIndex_interval() {
     return this.index_interval;
   }
@@ -1979,14 +1991,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
       }
       break;
 
-    case POPULATE_IO_CACHE_ON_FLUSH:
-      if (value == null) {
-        unsetPopulate_io_cache_on_flush();
-      } else {
-        setPopulate_io_cache_on_flush((Boolean)value);
-      }
-      break;
-
     case MEMTABLE_FLUSH_PERIOD_IN_MS:
       if (value == null) {
         unsetMemtable_flush_period_in_ms();
@@ -2131,6 +2135,14 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
       }
       break;
 
+    case POPULATE_IO_CACHE_ON_FLUSH:
+      if (value == null) {
+        unsetPopulate_io_cache_on_flush();
+      } else {
+        setPopulate_io_cache_on_flush((Boolean)value);
+      }
+      break;
+
     case INDEX_INTERVAL:
       if (value == null) {
         unsetIndex_interval();
@@ -2207,9 +2219,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
     case DCLOCAL_READ_REPAIR_CHANCE:
       return Double.valueOf(getDclocal_read_repair_chance());
 
-    case POPULATE_IO_CACHE_ON_FLUSH:
-      return Boolean.valueOf(isPopulate_io_cache_on_flush());
-
     case MEMTABLE_FLUSH_PERIOD_IN_MS:
       return Integer.valueOf(getMemtable_flush_period_in_ms());
 
@@ -2264,6 +2273,9 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
     case ROW_CACHE_KEYS_TO_SAVE:
       return Integer.valueOf(getRow_cache_keys_to_save());
 
+    case POPULATE_IO_CACHE_ON_FLUSH:
+      return Boolean.valueOf(isPopulate_io_cache_on_flush());
+
     case INDEX_INTERVAL:
       return Integer.valueOf(getIndex_interval());
 
@@ -2320,8 +2332,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
       return isSetCaching();
     case DCLOCAL_READ_REPAIR_CHANCE:
       return isSetDclocal_read_repair_chance();
-    case POPULATE_IO_CACHE_ON_FLUSH:
-      return isSetPopulate_io_cache_on_flush();
     case MEMTABLE_FLUSH_PERIOD_IN_MS:
       return isSetMemtable_flush_period_in_ms();
     case DEFAULT_TIME_TO_LIVE:
@@ -2358,6 +2368,8 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
       return isSetRow_cache_provider();
     case ROW_CACHE_KEYS_TO_SAVE:
       return isSetRow_cache_keys_to_save();
+    case POPULATE_IO_CACHE_ON_FLUSH:
+      return isSetPopulate_io_cache_on_flush();
     case INDEX_INTERVAL:
       return isSetIndex_interval();
     }
@@ -2566,15 +2578,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
         return false;
     }
 
-    boolean this_present_populate_io_cache_on_flush = true && this.isSetPopulate_io_cache_on_flush();
-    boolean that_present_populate_io_cache_on_flush = true && that.isSetPopulate_io_cache_on_flush();
-    if (this_present_populate_io_cache_on_flush || that_present_populate_io_cache_on_flush) {
-      if (!(this_present_populate_io_cache_on_flush && that_present_populate_io_cache_on_flush))
-        return false;
-      if (this.populate_io_cache_on_flush != that.populate_io_cache_on_flush)
-        return false;
-    }
-
     boolean this_present_memtable_flush_period_in_ms = true && this.isSetMemtable_flush_period_in_ms();
     boolean that_present_memtable_flush_period_in_ms = true && that.isSetMemtable_flush_period_in_ms();
     if (this_present_memtable_flush_period_in_ms || that_present_memtable_flush_period_in_ms) {
@@ -2737,6 +2740,15 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
         return false;
     }
 
+    boolean this_present_populate_io_cache_on_flush = true && this.isSetPopulate_io_cache_on_flush();
+    boolean that_present_populate_io_cache_on_flush = true && that.isSetPopulate_io_cache_on_flush();
+    if (this_present_populate_io_cache_on_flush || that_present_populate_io_cache_on_flush) {
+      if (!(this_present_populate_io_cache_on_flush && that_present_populate_io_cache_on_flush))
+        return false;
+      if (this.populate_io_cache_on_flush != that.populate_io_cache_on_flush)
+        return false;
+    }
+
     boolean this_present_index_interval = true && this.isSetIndex_interval();
     boolean that_present_index_interval = true && that.isSetIndex_interval();
     if (this_present_index_interval || that_present_index_interval) {
@@ -2858,11 +2870,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
     if (present_dclocal_read_repair_chance)
       builder.append(dclocal_read_repair_chance);
 
-    boolean present_populate_io_cache_on_flush = true && (isSetPopulate_io_cache_on_flush());
-    builder.append(present_populate_io_cache_on_flush);
-    if (present_populate_io_cache_on_flush)
-      builder.append(populate_io_cache_on_flush);
-
     boolean present_memtable_flush_period_in_ms = true && (isSetMemtable_flush_period_in_ms());
     builder.append(present_memtable_flush_period_in_ms);
     if (present_memtable_flush_period_in_ms)
@@ -2953,6 +2960,11 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
     if (present_row_cache_keys_to_save)
       builder.append(row_cache_keys_to_save);
 
+    boolean present_populate_io_cache_on_flush = true && (isSetPopulate_io_cache_on_flush());
+    builder.append(present_populate_io_cache_on_flush);
+    if (present_populate_io_cache_on_flush)
+      builder.append(populate_io_cache_on_flush);
+
     boolean present_index_interval = true && (isSetIndex_interval());
     builder.append(present_index_interval);
     if (present_index_interval)
@@ -3179,16 +3191,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(isSetPopulate_io_cache_on_flush()).compareTo(other.isSetPopulate_io_cache_on_flush());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetPopulate_io_cache_on_flush()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.populate_io_cache_on_flush, other.populate_io_cache_on_flush);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     lastComparison = Boolean.valueOf(isSetMemtable_flush_period_in_ms()).compareTo(other.isSetMemtable_flush_period_in_ms());
     if (lastComparison != 0) {
       return lastComparison;
@@ -3369,6 +3371,16 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(isSetPopulate_io_cache_on_flush()).compareTo(other.isSetPopulate_io_cache_on_flush());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPopulate_io_cache_on_flush()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.populate_io_cache_on_flush, other.populate_io_cache_on_flush);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     lastComparison = Boolean.valueOf(isSetIndex_interval()).compareTo(other.isSetIndex_interval());
     if (lastComparison != 0) {
       return lastComparison;
@@ -3576,12 +3588,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
       sb.append(this.dclocal_read_repair_chance);
       first = false;
     }
-    if (isSetPopulate_io_cache_on_flush()) {
-      if (!first) sb.append(", ");
-      sb.append("populate_io_cache_on_flush:");
-      sb.append(this.populate_io_cache_on_flush);
-      first = false;
-    }
     if (isSetMemtable_flush_period_in_ms()) {
       if (!first) sb.append(", ");
       sb.append("memtable_flush_period_in_ms:");
@@ -3706,6 +3712,12 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
       sb.append(this.row_cache_keys_to_save);
       first = false;
     }
+    if (isSetPopulate_io_cache_on_flush()) {
+      if (!first) sb.append(", ");
+      sb.append("populate_io_cache_on_flush:");
+      sb.append(this.populate_io_cache_on_flush);
+      first = false;
+    }
     if (isSetIndex_interval()) {
       if (!first) sb.append(", ");
       sb.append("index_interval:");
@@ -3966,14 +3978,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 38: // POPULATE_IO_CACHE_ON_FLUSH
-            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
-              struct.populate_io_cache_on_flush = iprot.readBool();
-              struct.setPopulate_io_cache_on_flushIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
           case 39: // MEMTABLE_FLUSH_PERIOD_IN_MS
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
               struct.memtable_flush_period_in_ms = iprot.readI32();
@@ -4129,6 +4133,14 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 38: // POPULATE_IO_CACHE_ON_FLUSH
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.populate_io_cache_on_flush = iprot.readBool();
+              struct.setPopulate_io_cache_on_flushIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           case 41: // INDEX_INTERVAL
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
               struct.index_interval = iprot.readI32();
@@ -4496,61 +4508,61 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
       if (struct.isSetDclocal_read_repair_chance()) {
         optionals.set(18);
       }
-      if (struct.isSetPopulate_io_cache_on_flush()) {
+      if (struct.isSetMemtable_flush_period_in_ms()) {
         optionals.set(19);
       }
-      if (struct.isSetMemtable_flush_period_in_ms()) {
+      if (struct.isSetDefault_time_to_live()) {
         optionals.set(20);
       }
-      if (struct.isSetDefault_time_to_live()) {
+      if (struct.isSetSpeculative_retry()) {
         optionals.set(21);
       }
-      if (struct.isSetSpeculative_retry()) {
+      if (struct.isSetTriggers()) {
         optionals.set(22);
       }
-      if (struct.isSetTriggers()) {
+      if (struct.isSetCells_per_row_to_cache()) {
         optionals.set(23);
       }
-      if (struct.isSetCells_per_row_to_cache()) {
+      if (struct.isSetMin_index_interval()) {
         optionals.set(24);
       }
-      if (struct.isSetMin_index_interval()) {
+      if (struct.isSetMax_index_interval()) {
         optionals.set(25);
       }
-      if (struct.isSetMax_index_interval()) {
+      if (struct.isSetRow_cache_size()) {
         optionals.set(26);
       }
-      if (struct.isSetRow_cache_size()) {
+      if (struct.isSetKey_cache_size()) {
         optionals.set(27);
       }
-      if (struct.isSetKey_cache_size()) {
+      if (struct.isSetRow_cache_save_period_in_seconds()) {
         optionals.set(28);
       }
-      if (struct.isSetRow_cache_save_period_in_seconds()) {
+      if (struct.isSetKey_cache_save_period_in_seconds()) {
         optionals.set(29);
       }
-      if (struct.isSetKey_cache_save_period_in_seconds()) {
+      if (struct.isSetMemtable_flush_after_mins()) {
         optionals.set(30);
       }
-      if (struct.isSetMemtable_flush_after_mins()) {
+      if (struct.isSetMemtable_throughput_in_mb()) {
         optionals.set(31);
       }
-      if (struct.isSetMemtable_throughput_in_mb()) {
+      if (struct.isSetMemtable_operations_in_millions()) {
         optionals.set(32);
       }
-      if (struct.isSetMemtable_operations_in_millions()) {
+      if (struct.isSetReplicate_on_write()) {
         optionals.set(33);
       }
-      if (struct.isSetReplicate_on_write()) {
+      if (struct.isSetMerge_shards_chance()) {
         optionals.set(34);
       }
-      if (struct.isSetMerge_shards_chance()) {
+      if (struct.isSetRow_cache_provider()) {
         optionals.set(35);
       }
-      if (struct.isSetRow_cache_provider()) {
+      if (struct.isSetRow_cache_keys_to_save()) {
         optionals.set(36);
       }
-      if (struct.isSetRow_cache_keys_to_save()) {
+      if (struct.isSetPopulate_io_cache_on_flush()) {
         optionals.set(37);
       }
       if (struct.isSetIndex_interval()) {
@@ -4634,9 +4646,6 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
       if (struct.isSetDclocal_read_repair_chance()) {
         oprot.writeDouble(struct.dclocal_read_repair_chance);
       }
-      if (struct.isSetPopulate_io_cache_on_flush()) {
-        oprot.writeBool(struct.populate_io_cache_on_flush);
-      }
       if (struct.isSetMemtable_flush_period_in_ms()) {
         oprot.writeI32(struct.memtable_flush_period_in_ms);
       }
@@ -4697,6 +4706,9 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
       if (struct.isSetRow_cache_keys_to_save()) {
         oprot.writeI32(struct.row_cache_keys_to_save);
       }
+      if (struct.isSetPopulate_io_cache_on_flush()) {
+        oprot.writeBool(struct.populate_io_cache_on_flush);
+      }
       if (struct.isSetIndex_interval()) {
         oprot.writeI32(struct.index_interval);
       }
@@ -4819,22 +4831,18 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
         struct.setDclocal_read_repair_chanceIsSet(true);
       }
       if (incoming.get(19)) {
-        struct.populate_io_cache_on_flush = iprot.readBool();
-        struct.setPopulate_io_cache_on_flushIsSet(true);
-      }
-      if (incoming.get(20)) {
         struct.memtable_flush_period_in_ms = iprot.readI32();
         struct.setMemtable_flush_period_in_msIsSet(true);
       }
-      if (incoming.get(21)) {
+      if (incoming.get(20)) {
         struct.default_time_to_live = iprot.readI32();
         struct.setDefault_time_to_liveIsSet(true);
       }
-      if (incoming.get(22)) {
+      if (incoming.get(21)) {
         struct.speculative_retry = iprot.readString();
         struct.setSpeculative_retryIsSet(true);
       }
-      if (incoming.get(23)) {
+      if (incoming.get(22)) {
         {
           org.apache.thrift.protocol.TList _list143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
           struct.triggers = new ArrayList<TriggerDef>(_list143.size);
@@ -4848,62 +4856,66 @@ public class CfDef implements org.apache.thrift.TBase<CfDef, CfDef._Fields>, jav
         }
         struct.setTriggersIsSet(true);
       }
-      if (incoming.get(24)) {
+      if (incoming.get(23)) {
         struct.cells_per_row_to_cache = iprot.readString();
         struct.setCells_per_row_to_cacheIsSet(true);
       }
-      if (incoming.get(25)) {
+      if (incoming.get(24)) {
         struct.min_index_interval = iprot.readI32();
         struct.setMin_index_intervalIsSet(true);
       }
-      if (incoming.get(26)) {
+      if (incoming.get(25)) {
         struct.max_index_interval = iprot.readI32();
         struct.setMax_index_intervalIsSet(true);
       }
-      if (incoming.get(27)) {
+      if (incoming.get(26)) {
         struct.row_cache_size = iprot.readDouble();
         struct.setRow_cache_sizeIsSet(true);
       }
-      if (incoming.get(28)) {
+      if (incoming.get(27)) {
         struct.key_cache_size = iprot.readDouble();
         struct.setKey_cache_sizeIsSet(true);
       }
-      if (incoming.get(29)) {
+      if (incoming.get(28)) {
         struct.row_cache_save_period_in_seconds = iprot.readI32();
         struct.setRow_cache_save_period_in_secondsIsSet(true);
       }
-      if (incoming.get(30)) {
+      if (incoming.get(29)) {
         struct.key_cache_save_period_in_seconds = iprot.readI32();
         struct.setKey_cache_save_period_in_secondsIsSet(true);
       }
-      if (incoming.get(31)) {
+      if (incoming.get(30)) {
         struct.memtable_flush_after_mins = iprot.readI32();
         struct.setMemtable_flush_after_minsIsSet(true);
       }
-      if (incoming.get(32)) {
+      if (incoming.get(31)) {
         struct.memtable_throughput_in_mb = iprot.readI32();
         struct.setMemtable_throughput_in_mbIsSet(true);
       }
-      if (incoming.get(33)) {
+      if (incoming.get(32)) {
         struct.memtable_operations_in_millions = iprot.readDouble();
         struct.setMemtable_operations_in_millionsIsSet(true);
       }
-      if (incoming.get(34)) {
+      if (incoming.get(33)) {
         struct.replicate_on_write = iprot.readBool();
         struct.setReplicate_on_writeIsSet(true);
       }
-      if (incoming.get(35)) {
+      if (incoming.get(34)) {
         struct.merge_shards_chance = iprot.readDouble();
         struct.setMerge_shards_chanceIsSet(true);
       }
-      if (incoming.get(36)) {
+      if (incoming.get(35)) {
         struct.row_cache_provider = iprot.readString();
         struct.setRow_cache_providerIsSet(true);
       }
-      if (incoming.get(37)) {
+      if (incoming.get(36)) {
         struct.row_cache_keys_to_save = iprot.readI32();
         struct.setRow_cache_keys_to_saveIsSet(true);
       }
+      if (incoming.get(37)) {
+        struct.populate_io_cache_on_flush = iprot.readBool();
+        struct.setPopulate_io_cache_on_flushIsSet(true);
+      }
       if (incoming.get(38)) {
         struct.index_interval = iprot.readI32();
         struct.setIndex_intervalIsSet(true);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7f63b1f9/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnSlice.java
----------------------------------------------------------------------
diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnSlice.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnSlice.java
new file mode 100644
index 0000000..67b88a3
--- /dev/null
+++ b/interface/thrift/gen-java/org/apache/cassandra/thrift/ColumnSlice.java
@@ -0,0 +1,551 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.1)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.cassandra.thrift;
+/*
+ * 
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * 
+ */
+
+
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The ColumnSlice is used to select a set of columns from inside a row.
+ * If start or finish are unspecified they will default to the start-of
+ * end-of value.
+ * @param start. The start of the ColumnSlice inclusive
+ * @param finish. The end of the ColumnSlice inclusive
+ */
+public class ColumnSlice implements org.apache.thrift.TBase<ColumnSlice, ColumnSlice._Fields>, java.io.Serializable, Cloneable, Comparable<ColumnSlice> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnSlice");
+
+  private static final org.apache.thrift.protocol.TField START_FIELD_DESC = new org.apache.thrift.protocol.TField("start", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField FINISH_FIELD_DESC = new org.apache.thrift.protocol.TField("finish", org.apache.thrift.protocol.TType.STRING, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ColumnSliceStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ColumnSliceTupleSchemeFactory());
+  }
+
+  public ByteBuffer start; // optional
+  public ByteBuffer finish; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    START((short)1, "start"),
+    FINISH((short)2, "finish");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // START
+          return START;
+        case 2: // FINISH
+          return FINISH;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private _Fields optionals[] = {_Fields.START,_Fields.FINISH};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.START, new org.apache.thrift.meta_data.FieldMetaData("start", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    tmpMap.put(_Fields.FINISH, new org.apache.thrift.meta_data.FieldMetaData("finish", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnSlice.class, metaDataMap);
+  }
+
+  public ColumnSlice() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ColumnSlice(ColumnSlice other) {
+    if (other.isSetStart()) {
+      this.start = org.apache.thrift.TBaseHelper.copyBinary(other.start);
+;
+    }
+    if (other.isSetFinish()) {
+      this.finish = org.apache.thrift.TBaseHelper.copyBinary(other.finish);
+;
+    }
+  }
+
+  public ColumnSlice deepCopy() {
+    return new ColumnSlice(this);
+  }
+
+  @Override
+  public void clear() {
+    this.start = null;
+    this.finish = null;
+  }
+
+  public byte[] getStart() {
+    setStart(org.apache.thrift.TBaseHelper.rightSize(start));
+    return start == null ? null : start.array();
+  }
+
+  public ByteBuffer bufferForStart() {
+    return start;
+  }
+
+  public ColumnSlice setStart(byte[] start) {
+    setStart(start == null ? (ByteBuffer)null : ByteBuffer.wrap(start));
+    return this;
+  }
+
+  public ColumnSlice setStart(ByteBuffer start) {
+    this.start = start;
+    return this;
+  }
+
+  public void unsetStart() {
+    this.start = null;
+  }
+
+  /** Returns true if field start is set (has been assigned a value) and false otherwise */
+  public boolean isSetStart() {
+    return this.start != null;
+  }
+
+  public void setStartIsSet(boolean value) {
+    if (!value) {
+      this.start = null;
+    }
+  }
+
+  public byte[] getFinish() {
+    setFinish(org.apache.thrift.TBaseHelper.rightSize(finish));
+    return finish == null ? null : finish.array();
+  }
+
+  public ByteBuffer bufferForFinish() {
+    return finish;
+  }
+
+  public ColumnSlice setFinish(byte[] finish) {
+    setFinish(finish == null ? (ByteBuffer)null : ByteBuffer.wrap(finish));
+    return this;
+  }
+
+  public ColumnSlice setFinish(ByteBuffer finish) {
+    this.finish = finish;
+    return this;
+  }
+
+  public void unsetFinish() {
+    this.finish = null;
+  }
+
+  /** Returns true if field finish is set (has been assigned a value) and false otherwise */
+  public boolean isSetFinish() {
+    return this.finish != null;
+  }
+
+  public void setFinishIsSet(boolean value) {
+    if (!value) {
+      this.finish = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case START:
+      if (value == null) {
+        unsetStart();
+      } else {
+        setStart((ByteBuffer)value);
+      }
+      break;
+
+    case FINISH:
+      if (value == null) {
+        unsetFinish();
+      } else {
+        setFinish((ByteBuffer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case START:
+      return getStart();
+
+    case FINISH:
+      return getFinish();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case START:
+      return isSetStart();
+    case FINISH:
+      return isSetFinish();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ColumnSlice)
+      return this.equals((ColumnSlice)that);
+    return false;
+  }
+
+  public boolean equals(ColumnSlice that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_start = true && this.isSetStart();
+    boolean that_present_start = true && that.isSetStart();
+    if (this_present_start || that_present_start) {
+      if (!(this_present_start && that_present_start))
+        return false;
+      if (!this.start.equals(that.start))
+        return false;
+    }
+
+    boolean this_present_finish = true && this.isSetFinish();
+    boolean that_present_finish = true && that.isSetFinish();
+    if (this_present_finish || that_present_finish) {
+      if (!(this_present_finish && that_present_finish))
+        return false;
+      if (!this.finish.equals(that.finish))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    HashCodeBuilder builder = new HashCodeBuilder();
+
+    boolean present_start = true && (isSetStart());
+    builder.append(present_start);
+    if (present_start)
+      builder.append(start);
+
+    boolean present_finish = true && (isSetFinish());
+    builder.append(present_finish);
+    if (present_finish)
+      builder.append(finish);
+
+    return builder.toHashCode();
+  }
+
+  @Override
+  public int compareTo(ColumnSlice other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetStart()).compareTo(other.isSetStart());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetStart()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.start, other.start);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetFinish()).compareTo(other.isSetFinish());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFinish()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.finish, other.finish);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ColumnSlice(");
+    boolean first = true;
+
+    if (isSetStart()) {
+      sb.append("start:");
+      if (this.start == null) {
+        sb.append("null");
+      } else {
+        org.apache.thrift.TBaseHelper.toString(this.start, sb);
+      }
+      first = false;
+    }
+    if (isSetFinish()) {
+      if (!first) sb.append(", ");
+      sb.append("finish:");
+      if (this.finish == null) {
+        sb.append("null");
+      } else {
+        org.apache.thrift.TBaseHelper.toString(this.finish, sb);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ColumnSliceStandardSchemeFactory implements SchemeFactory {
+    public ColumnSliceStandardScheme getScheme() {
+      return new ColumnSliceStandardScheme();
+    }
+  }
+
+  private static class ColumnSliceStandardScheme extends StandardScheme<ColumnSlice> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnSlice struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // START
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.start = iprot.readBinary();
+              struct.setStartIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // FINISH
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.finish = iprot.readBinary();
+              struct.setFinishIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+
+      // check for required fields of primitive type, which can't be checked in the validate method
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnSlice struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.start != null) {
+        if (struct.isSetStart()) {
+          oprot.writeFieldBegin(START_FIELD_DESC);
+          oprot.writeBinary(struct.start);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.finish != null) {
+        if (struct.isSetFinish()) {
+          oprot.writeFieldBegin(FINISH_FIELD_DESC);
+          oprot.writeBinary(struct.finish);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ColumnSliceTupleSchemeFactory implements SchemeFactory {
+    public ColumnSliceTupleScheme getScheme() {
+      return new ColumnSliceTupleScheme();
+    }
+  }
+
+  private static class ColumnSliceTupleScheme extends TupleScheme<ColumnSlice> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ColumnSlice struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetStart()) {
+        optionals.set(0);
+      }
+      if (struct.isSetFinish()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetStart()) {
+        oprot.writeBinary(struct.start);
+      }
+      if (struct.isSetFinish()) {
+        oprot.writeBinary(struct.finish);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ColumnSlice struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.start = iprot.readBinary();
+        struct.setStartIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.finish = iprot.readBinary();
+        struct.setFinishIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7f63b1f9/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlRow.java
----------------------------------------------------------------------
diff --git a/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlRow.java b/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlRow.java
index bc0cf77..7487ed7 100644
--- a/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlRow.java
+++ b/interface/thrift/gen-java/org/apache/cassandra/thrift/CqlRow.java
@@ -55,7 +55,12 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Row returned from a CQL query
+ * Row returned from a CQL query.
+ * 
+ * This struct is used for both CQL2 and CQL3 queries.  For CQL2, the partition key
+ * is special-cased and is always returned.  For CQL3, it is not special cased;
+ * it will be included in the columns list if it was included in the SELECT and
+ * the key field is always null.
  */
 public class CqlRow implements org.apache.thrift.TBase<CqlRow, CqlRow._Fields>, java.io.Serializable, Cloneable, Comparable<CqlRow> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CqlRow");