You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by ca...@codespot.com on 2013/10/17 22:02:56 UTC

[cassandra-dbapi2] push by alek...@yeschenko.com - Add LOCAL_ONE consistency level... on 2013-10-17 20:02 GMT

Revision: bb4a638602f9
Author:   Aleksey Yeschenko <al...@yeschenko.com>
Date:     Thu Oct 17 20:01:21 2013 UTC
Log:      Add LOCAL_ONE consistency level

Patch by Jason Brown for #53

http://code.google.com/a/apache-extras.org/p/cassandra-dbapi2/source/detail?r=bb4a638602f9

Modified:
  /cql/cassandra/Cassandra.py
  /cql/cassandra/constants.py
  /cql/cassandra/ttypes.py
  /cql/connection.py
  /cql/native.py

=======================================
--- /cql/cassandra/Cassandra.py	Thu Oct 18 01:32:51 2012 UTC
+++ /cql/cassandra/Cassandra.py	Thu Oct 17 20:01:21 2013 UTC
@@ -119,7 +119,7 @@
    def get_indexed_slices(self, column_parent, index_clause,  
column_predicate, consistency_level):
      """
      Returns the subset of columns specified in SlicePredicate for the rows  
matching the IndexClause
-    @Deprecated; use get_range_slices instead with range.row_filter  
specified
+    @deprecated use get_range_slices instead with range.row_filter  
specified

      Parameters:
       - column_parent
@@ -314,6 +314,16 @@
      """
      pass

+  def describe_splits_ex(self, cfName, start_token, end_token,  
keys_per_split):
+    """
+    Parameters:
+     - cfName
+     - start_token
+     - end_token
+     - keys_per_split
+    """
+    pass
+
    def system_add_column_family(self, cf_def):
      """
      adds a column family. returns the new schema id.
@@ -431,7 +441,7 @@

    def set_cql_version(self, version):
      """
-    @Deprecated This is now a no-op. Please use the CQL3 specific methods  
instead.
+    @deprecated This is now a no-op. Please use the CQL3 specific methods  
instead.

      Parameters:
       - version
@@ -822,7 +832,7 @@
    def get_indexed_slices(self, column_parent, index_clause,  
column_predicate, consistency_level):
      """
      Returns the subset of columns specified in SlicePredicate for the rows  
matching the IndexClause
-    @Deprecated; use get_range_slices instead with range.row_filter  
specified
+    @deprecated use get_range_slices instead with range.row_filter  
specified

      Parameters:
       - column_parent
@@ -1513,6 +1523,44 @@
        return result.success
      raise  
TApplicationException(TApplicationException.MISSING_RESULT, "trace_next_query  
failed: unknown result");

+  def describe_splits_ex(self, cfName, start_token, end_token,  
keys_per_split):
+    """
+    Parameters:
+     - cfName
+     - start_token
+     - end_token
+     - keys_per_split
+    """
+    self.send_describe_splits_ex(cfName, start_token, end_token,  
keys_per_split)
+    return self.recv_describe_splits_ex()
+
+  def send_describe_splits_ex(self, cfName, start_token, end_token,  
keys_per_split):
+    self._oprot.writeMessageBegin('describe_splits_ex', TMessageType.CALL,  
self._seqid)
+    args = describe_splits_ex_args()
+    args.cfName = cfName
+    args.start_token = start_token
+    args.end_token = end_token
+    args.keys_per_split = keys_per_split
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_describe_splits_ex(self, ):
+    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(self._iprot)
+      self._iprot.readMessageEnd()
+      raise x
+    result = describe_splits_ex_result()
+    result.read(self._iprot)
+    self._iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    if result.ire is not None:
+      raise result.ire
+    raise  
TApplicationException(TApplicationException.MISSING_RESULT, "describe_splits_ex  
failed: unknown result");
+
    def system_add_column_family(self, cf_def):
      """
      adds a column family. returns the new schema id.
@@ -1974,7 +2022,7 @@

    def set_cql_version(self, version):
      """
-    @Deprecated This is now a no-op. Please use the CQL3 specific methods  
instead.
+    @deprecated This is now a no-op. Please use the CQL3 specific methods  
instead.

      Parameters:
       - version
@@ -2037,6 +2085,7 @@
      self._processMap["describe_keyspace"] =  
Processor.process_describe_keyspace
      self._processMap["describe_splits"] = Processor.process_describe_splits
      self._processMap["trace_next_query"] =  
Processor.process_trace_next_query
+    self._processMap["describe_splits_ex"] =  
Processor.process_describe_splits_ex
      self._processMap["system_add_column_family"] =  
Processor.process_system_add_column_family
      self._processMap["system_drop_column_family"] =  
Processor.process_system_drop_column_family
      self._processMap["system_add_keyspace"] =  
Processor.process_system_add_keyspace
@@ -2509,6 +2558,20 @@
      oprot.writeMessageEnd()
      oprot.trans.flush()

+  def process_describe_splits_ex(self, seqid, iprot, oprot):
+    args = describe_splits_ex_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = describe_splits_ex_result()
+    try:
+      result.success = self._handler.describe_splits_ex(args.cfName,  
args.start_token, args.end_token, args.keys_per_split)
+    except InvalidRequestException, ire:
+      result.ire = ire
+    oprot.writeMessageBegin("describe_splits_ex", TMessageType.REPLY,  
seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
    def process_system_add_column_family(self, seqid, iprot, oprot):
      args = system_add_column_family_args()
      args.read(iprot)
@@ -7349,6 +7412,191 @@
    def __ne__(self, other):
      return not (self == other)

+class describe_splits_ex_args:
+  """
+  Attributes:
+   - cfName
+   - start_token
+   - end_token
+   - keys_per_split
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'cfName', None, None, ), # 1
+    (2, TType.STRING, 'start_token', None, None, ), # 2
+    (3, TType.STRING, 'end_token', None, None, ), # 3
+    (4, TType.I32, 'keys_per_split', None, None, ), # 4
+  )
+
+  def __init__(self, cfName=None, start_token=None, end_token=None,  
keys_per_split=None,):
+    self.cfName = cfName
+    self.start_token = start_token
+    self.end_token = end_token
+    self.keys_per_split = keys_per_split
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and  
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec  
is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__,  
self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.cfName = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.start_token = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.STRING:
+          self.end_token = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.I32:
+          self.keys_per_split = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and  
self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__,  
self.thrift_spec)))
+      return
+    oprot.writeStructBegin('describe_splits_ex_args')
+    if self.cfName is not None:
+      oprot.writeFieldBegin('cfName', TType.STRING, 1)
+      oprot.writeString(self.cfName)
+      oprot.writeFieldEnd()
+    if self.start_token is not None:
+      oprot.writeFieldBegin('start_token', TType.STRING, 2)
+      oprot.writeString(self.start_token)
+      oprot.writeFieldEnd()
+    if self.end_token is not None:
+      oprot.writeFieldBegin('end_token', TType.STRING, 3)
+      oprot.writeString(self.end_token)
+      oprot.writeFieldEnd()
+    if self.keys_per_split is not None:
+      oprot.writeFieldBegin('keys_per_split', TType.I32, 4)
+      oprot.writeI32(self.keys_per_split)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.cfName is None:
+      raise TProtocol.TProtocolException(message='Required field cfName is  
unset!')
+    if self.start_token is None:
+      raise TProtocol.TProtocolException(message='Required field  
start_token is unset!')
+    if self.end_token is None:
+      raise TProtocol.TProtocolException(message='Required field end_token  
is unset!')
+    if self.keys_per_split is None:
+      raise TProtocol.TProtocolException(message='Required field  
keys_per_split is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ ==  
other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class describe_splits_ex_result:
+  """
+  Attributes:
+   - success
+   - ire
+  """
+
+  thrift_spec = (
+    (0, TType.LIST, 'success', (TType.STRUCT,(CfSplit,  
CfSplit.thrift_spec)), None, ), # 0
+    (1, TType.STRUCT, 'ire', (InvalidRequestException,  
InvalidRequestException.thrift_spec), None, ), # 1
+  )
+
+  def __init__(self, success=None, ire=None,):
+    self.success = success
+    self.ire = ire
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and  
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec  
is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__,  
self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.LIST:
+          self.success = []
+          (_etype334, _size331) = iprot.readListBegin()
+          for _i335 in xrange(_size331):
+            _elem336 = CfSplit()
+            _elem336.read(iprot)
+            self.success.append(_elem336)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 1:
+        if ftype == TType.STRUCT:
+          self.ire = InvalidRequestException()
+          self.ire.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and  
self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__,  
self.thrift_spec)))
+      return
+    oprot.writeStructBegin('describe_splits_ex_result')
+    if self.success is not None:
+      oprot.writeFieldBegin('success', TType.LIST, 0)
+      oprot.writeListBegin(TType.STRUCT, len(self.success))
+      for iter337 in self.success:
+        iter337.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.ire is not None:
+      oprot.writeFieldBegin('ire', TType.STRUCT, 1)
+      self.ire.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ ==  
other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
  class system_add_column_family_args:
    """
    Attributes:
@@ -8957,10 +9205,10 @@
        elif fid == 2:
          if ftype == TType.LIST:
            self.values = []
-          (_etype334, _size331) = iprot.readListBegin()
-          for _i335 in xrange(_size331):
-            _elem336 = iprot.readString();
-            self.values.append(_elem336)
+          (_etype341, _size338) = iprot.readListBegin()
+          for _i342 in xrange(_size338):
+            _elem343 = iprot.readString();
+            self.values.append(_elem343)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -8981,8 +9229,8 @@
      if self.values is not None:
        oprot.writeFieldBegin('values', TType.LIST, 2)
        oprot.writeListBegin(TType.STRING, len(self.values))
-      for iter337 in self.values:
-        oprot.writeString(iter337)
+      for iter344 in self.values:
+        oprot.writeString(iter344)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@ -9156,10 +9404,10 @@
        elif fid == 2:
          if ftype == TType.LIST:
            self.values = []
-          (_etype341, _size338) = iprot.readListBegin()
-          for _i342 in xrange(_size338):
-            _elem343 = iprot.readString();
-            self.values.append(_elem343)
+          (_etype348, _size345) = iprot.readListBegin()
+          for _i349 in xrange(_size345):
+            _elem350 = iprot.readString();
+            self.values.append(_elem350)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@ -9185,8 +9433,8 @@
      if self.values is not None:
        oprot.writeFieldBegin('values', TType.LIST, 2)
        oprot.writeListBegin(TType.STRING, len(self.values))
-      for iter344 in self.values:
-        oprot.writeString(iter344)
+      for iter351 in self.values:
+        oprot.writeString(iter351)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.consistency is not None:
=======================================
--- /cql/cassandra/constants.py	Thu Oct 18 01:32:51 2012 UTC
+++ /cql/cassandra/constants.py	Thu Oct 17 20:01:21 2013 UTC
@@ -7,4 +7,4 @@
  from thrift.Thrift import *
  from ttypes import *

-VERSION = "19.35.0"
+VERSION = "19.36.1"
=======================================
--- /cql/cassandra/ttypes.py	Wed Oct  3 23:50:04 2012 UTC
+++ /cql/cassandra/ttypes.py	Thu Oct 17 20:01:21 2013 UTC
@@ -43,6 +43,7 @@
      TWO          Ensure that the write has been written to at least 2  
node's commit log and memory table
      THREE        Ensure that the write has been written to at least 3  
node's commit log and memory table
      QUORUM       Ensure that the write has been written to  
<ReplicationFactor> / 2 + 1 nodes
+    LOCAL_ONE    Ensure that the write has been written to 1 node within  
the local datacenter (requires NetworkTopologyStrategy)
      LOCAL_QUORUM Ensure that the write has been written to  
<ReplicationFactor> / 2 + 1 nodes, within the local datacenter (requires  
NetworkTopologyStrategy)
      EACH_QUORUM  Ensure that the write has been written to  
<ReplicationFactor> / 2 + 1 nodes in each datacenter (requires  
NetworkTopologyStrategy)
      ALL          Ensure that the write is written to  
<code>&lt;ReplicationFactor&gt;</code> nodes before responding to the  
client.
@@ -53,6 +54,7 @@
      TWO          Returns the record with the most recent timestamp once  
two replicas have replied.
      THREE        Returns the record with the most recent timestamp once  
three replicas have replied.
      QUORUM       Returns the record with the most recent timestamp once a  
majority of replicas have replied.
+    LOCAL_ONE    Returns the record with the most recent timestamp once a  
single replica within the local datacenter have replied.
      LOCAL_QUORUM Returns the record with the most recent timestamp once a  
majority of replicas within the local datacenter have replied.
      EACH_QUORUM  Returns the record with the most recent timestamp once a  
majority of replicas within each datacenter have replied.
      ALL          Returns the record with the most recent timestamp once  
all replicas have replied (implies no replica may be down)..
@@ -65,6 +67,7 @@
    ANY = 6
    TWO = 7
    THREE = 8
+  LOCAL_ONE = 11

    _VALUES_TO_NAMES = {
      1: "ONE",
@@ -75,6 +78,7 @@
      6: "ANY",
      7: "TWO",
      8: "THREE",
+    11: "LOCAL_ONE",
    }

    _NAMES_TO_VALUES = {
@@ -86,6 +90,7 @@
      "ANY": 6,
      "TWO": 7,
      "THREE": 8,
+    "LOCAL_ONE": 11,
    }

  class IndexOperator:
@@ -1542,7 +1547,7 @@

  class IndexClause:
    """
-  @Deprecated: use a KeyRange with row_filter in get_range_slices instead
+  @deprecated use a KeyRange with row_filter in get_range_slices instead

    Attributes:
     - expressions
@@ -2562,6 +2567,7 @@
     - bloom_filter_fp_chance
     - caching
     - dclocal_read_repair_chance
+   - populate_io_cache_on_flush
     - row_cache_size: @deprecated
     - key_cache_size: @deprecated
     - row_cache_save_period_in_seconds: @deprecated
@@ -2613,9 +2619,10 @@
      None, # 35
      None, # 36
      (37, TType.DOUBLE, 'dclocal_read_repair_chance', None, 0, ), # 37
+    (38, TType.BOOL, 'populate_io_cache_on_flush', None, None, ), # 38
    )

-  def __init__(self, keyspace=None, name=None,  
column_type=thrift_spec[3][4], comparator_type=thrift_spec[5][4],  
subcomparator_type=None, comment=None, read_repair_chance=None,  
column_metadata=None, gc_grace_seconds=None, default_validation_class=None,  
id=None, min_compaction_threshold=None, max_compaction_threshold=None,  
replicate_on_write=None, key_validation_class=None, key_alias=None,  
compaction_strategy=None, compaction_strategy_options=None,  
compression_options=None, bloom_filter_fp_chance=None,  
caching=thrift_spec[34][4], dclocal_read_repair_chance=thrift_spec[37][4],  
row_cache_size=None, key_cache_size=None,  
row_cache_save_period_in_seconds=None,  
key_cache_save_period_in_seconds=None, memtable_flush_after_mins=None,  
memtable_throughput_in_mb=None, memtable_operations_in_millions=None,  
merge_shards_chance=None, row_cache_provider=None,  
row_cache_keys_to_save=None,):
+  def __init__(self, keyspace=None, name=None,  
column_type=thrift_spec[3][4], comparator_type=thrift_spec[5][4],  
subcomparator_type=None, comment=None, read_repair_chance=None,  
column_metadata=None, gc_grace_seconds=None, default_validation_class=None,  
id=None, min_compaction_threshold=None, max_compaction_threshold=None,  
replicate_on_write=None, key_validation_class=None, key_alias=None,  
compaction_strategy=None, compaction_strategy_options=None,  
compression_options=None, bloom_filter_fp_chance=None,  
caching=thrift_spec[34][4], dclocal_read_repair_chance=thrift_spec[37][4],  
populate_io_cache_on_flush=None, row_cache_size=None, key_cache_size=None,  
row_cache_save_period_in_seconds=None,  
key_cache_save_period_in_seconds=None, memtable_flush_after_mins=None,  
memtable_throughput_in_mb=None, memtable_operations_in_millions=None,  
merge_shards_chance=None, row_cache_provider=None,  
row_cache_keys_to_save=None,):
      self.keyspace = keyspace
      self.name = name
      self.column_type = column_type
@@ -2638,6 +2645,7 @@
      self.bloom_filter_fp_chance = bloom_filter_fp_chance
      self.caching = caching
      self.dclocal_read_repair_chance = dclocal_read_repair_chance
+    self.populate_io_cache_on_flush = populate_io_cache_on_flush
      self.row_cache_size = row_cache_size
      self.key_cache_size = key_cache_size
      self.row_cache_save_period_in_seconds =  
row_cache_save_period_in_seconds
@@ -2785,6 +2793,11 @@
          if ftype == TType.DOUBLE:
            self.dclocal_read_repair_chance = iprot.readDouble();
          else:
+          iprot.skip(ftype)
+      elif fid == 38:
+        if ftype == TType.BOOL:
+          self.populate_io_cache_on_flush = iprot.readBool();
+        else:
            iprot.skip(ftype)
        elif fid == 9:
          if ftype == TType.DOUBLE:
@@ -2985,6 +2998,10 @@
        oprot.writeFieldBegin('dclocal_read_repair_chance', TType.DOUBLE, 37)
        oprot.writeDouble(self.dclocal_read_repair_chance)
        oprot.writeFieldEnd()
+    if self.populate_io_cache_on_flush is not None:
+      oprot.writeFieldBegin('populate_io_cache_on_flush', TType.BOOL, 38)
+      oprot.writeBool(self.populate_io_cache_on_flush)
+      oprot.writeFieldEnd()
      oprot.writeFieldStop()
      oprot.writeStructEnd()

@@ -3013,7 +3030,7 @@
     - name
     - strategy_class
     - strategy_options
-   - replication_factor: @deprecated, ignored
+   - replication_factor: @deprecated ignored
     - cf_defs
     - durable_writes
    """
@@ -3586,3 +3603,95 @@

    def __ne__(self, other):
      return not (self == other)
+
+class CfSplit:
+  """
+  Represents input splits used by hadoop ColumnFamilyRecordReaders
+
+  Attributes:
+   - start_token
+   - end_token
+   - row_count
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'start_token', None, None, ), # 1
+    (2, TType.STRING, 'end_token', None, None, ), # 2
+    (3, TType.I64, 'row_count', None, None, ), # 3
+  )
+
+  def __init__(self, start_token=None, end_token=None, row_count=None,):
+    self.start_token = start_token
+    self.end_token = end_token
+    self.row_count = row_count
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and  
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec  
is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__,  
self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.start_token = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.end_token = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.I64:
+          self.row_count = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and  
self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__,  
self.thrift_spec)))
+      return
+    oprot.writeStructBegin('CfSplit')
+    if self.start_token is not None:
+      oprot.writeFieldBegin('start_token', TType.STRING, 1)
+      oprot.writeString(self.start_token)
+      oprot.writeFieldEnd()
+    if self.end_token is not None:
+      oprot.writeFieldBegin('end_token', TType.STRING, 2)
+      oprot.writeString(self.end_token)
+      oprot.writeFieldEnd()
+    if self.row_count is not None:
+      oprot.writeFieldBegin('row_count', TType.I64, 3)
+      oprot.writeI64(self.row_count)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.start_token is None:
+      raise TProtocol.TProtocolException(message='Required field  
start_token is unset!')
+    if self.end_token is None:
+      raise TProtocol.TProtocolException(message='Required field end_token  
is unset!')
+    if self.row_count is None:
+      raise TProtocol.TProtocolException(message='Required field row_count  
is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ ==  
other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
=======================================
--- /cql/connection.py	Sun May 26 00:41:52 2013 UTC
+++ /cql/connection.py	Thu Oct 17 20:01:21 2013 UTC
@@ -37,7 +37,7 @@
          *                       compression supported by both sides.
          * consistency_level ..: consistency level to use for CQL3 queries  
(optional);
          *                       "ONE" is the default CL, other supported  
values are:
-         
*                       "ANY", "TWO", "THREE", "QUORUM", "LOCAL_QUORUM",
+         
*                       "ANY", "TWO", "THREE", "QUORUM", "LOCAL_ONE", "LOCAL_QUORUM",
          *                       "EACH_QUORUM" and "ALL";
          *                       overridable on per-query basis.
          * transport...........: Thrift transport to use (optional);
=======================================
--- /cql/native.py	Sun May 26 01:25:49 2013 UTC
+++ /cql/native.py	Thu Oct 17 20:01:21 2013 UTC
@@ -47,7 +47,8 @@
                  4: 'QUORUM',
                  5: 'ALL',
                  6: 'LOCAL_QUORUM',
-                7: 'EACH_QUORUM'}[value]
+                7: 'EACH_QUORUM',
+                10:'LOCAL_ONE'}[value]

      @classmethod
      def value_from_name(cls, name):
@@ -58,7 +59,8 @@
                  'QUORUM': 4,
                  'ALL': 5,
                  'LOCAL_QUORUM': 6,
-                'EACH_QUORUM': 7}[name]
+                'EACH_QUORUM': 7,
+                'LOCAL_ONE': 10}[name]

  class CqlResult:
      def __init__(self, column_metadata, rows):