You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by ee...@apache.org on 2011/03/16 20:36:35 UTC
svn commit: r1082271 [2/5] - in /cassandra/trunk/drivers/py: cql/cassandra/
cql/cassandra/Cassandra.py cql/cassandra/__init__.py
cql/cassandra/constants.py cql/cassandra/ttypes.py cql/connection.py setup.py
Added: cassandra/trunk/drivers/py/cql/cassandra/Cassandra.py
URL: http://svn.apache.org/viewvc/cassandra/trunk/drivers/py/cql/cassandra/Cassandra.py?rev=1082271&view=auto
==============================================================================
--- cassandra/trunk/drivers/py/cql/cassandra/Cassandra.py (added)
+++ cassandra/trunk/drivers/py/cql/cassandra/Cassandra.py Wed Mar 16 19:36:35 2011
@@ -0,0 +1,8194 @@
+#
+# Autogenerated by Thrift
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+
+from thrift.Thrift import *
+from ttypes import *
+from thrift.Thrift import TProcessor
+from thrift.transport import TTransport
+from thrift.protocol import TBinaryProtocol, TProtocol
+try:
+ from thrift.protocol import fastbinary
+except:
+ fastbinary = None
+
+
+class Iface:
+ def login(self, auth_request):
+ """
+ Parameters:
+ - auth_request
+ """
+ pass
+
+ def set_keyspace(self, keyspace):
+ """
+ Parameters:
+ - keyspace
+ """
+ pass
+
+ def get(self, key, column_path, consistency_level):
+ """
+ Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is
+ the only method that can throw an exception under non-failure conditions.)
+
+ Parameters:
+ - key
+ - column_path
+ - consistency_level
+ """
+ pass
+
+ def get_slice(self, key, column_parent, predicate, consistency_level):
+ """
+ Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name
+ pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned.
+
+ Parameters:
+ - key
+ - column_parent
+ - predicate
+ - consistency_level
+ """
+ pass
+
+ def get_count(self, key, column_parent, predicate, consistency_level):
+ """
+ returns the number of columns matching <code>predicate</code> for a particular <code>key</code>,
+ <code>ColumnFamily</code> and optionally <code>SuperColumn</code>.
+
+ Parameters:
+ - key
+ - column_parent
+ - predicate
+ - consistency_level
+ """
+ pass
+
+ def multiget_slice(self, keys, column_parent, predicate, consistency_level):
+ """
+ Performs a get_slice for column_parent and predicate for the given keys in parallel.
+
+ Parameters:
+ - keys
+ - column_parent
+ - predicate
+ - consistency_level
+ """
+ pass
+
+ def multiget_count(self, keys, column_parent, predicate, consistency_level):
+ """
+ Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
+
+ Parameters:
+ - keys
+ - column_parent
+ - predicate
+ - consistency_level
+ """
+ pass
+
+ def get_range_slices(self, column_parent, predicate, range, consistency_level):
+ """
+ returns a subset of columns for a contiguous range of keys.
+
+ Parameters:
+ - column_parent
+ - predicate
+ - range
+ - consistency_level
+ """
+ pass
+
+ def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
+ """
+ Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
+
+ Parameters:
+ - column_parent
+ - index_clause
+ - column_predicate
+ - consistency_level
+ """
+ pass
+
+ def insert(self, key, column_parent, column, consistency_level):
+ """
+ Insert a Column at the given column_parent.column_family and optional column_parent.super_column.
+
+ Parameters:
+ - key
+ - column_parent
+ - column
+ - consistency_level
+ """
+ pass
+
+ def remove(self, key, column_path, timestamp, consistency_level):
+ """
+ Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
+ that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
+ row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
+
+ Note that counters have limited support for deletes: if you remove
+ a counter, you must wait to issue any following update until the
+ delete has reached all the nodes and all of them have been fully
+ compacted.
+
+ Parameters:
+ - key
+ - column_path
+ - timestamp
+ - consistency_level
+ """
+ pass
+
+ def batch_mutate(self, mutation_map, consistency_level):
+ """
+ Mutate many columns or super columns for many row keys. See also: Mutation.
+
+ mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
+ *
+
+ Parameters:
+ - mutation_map
+ - consistency_level
+ """
+ pass
+
+ def truncate(self, cfname):
+ """
+ Truncate will mark and entire column family as deleted.
+ From the user's perspective a successful call to truncate will result complete data deletion from cfname.
+ Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one
+ only marks the data as deleted.
+ The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if
+ some hosts are down.
+
+ Parameters:
+ - cfname
+ """
+ pass
+
+ def add(self, key, column_parent, column, consistency_level):
+ """
+ Increment or decrement a counter.
+
+ Parameters:
+ - key
+ - column_parent
+ - column
+ - consistency_level
+ """
+ pass
+
+ def batch_add(self, update_map, consistency_level):
+ """
+ Batch increment or decrement a counter.
+
+ Parameters:
+ - update_map
+ - consistency_level
+ """
+ pass
+
+ def get_counter(self, key, path, consistency_level):
+ """
+ Return the counter at the specified column path.
+
+ Parameters:
+ - key
+ - path
+ - consistency_level
+ """
+ pass
+
+ def get_counter_slice(self, key, column_parent, predicate, consistency_level):
+ """
+ Get a list of counters from the specified columns.
+
+ Parameters:
+ - key
+ - column_parent
+ - predicate
+ - consistency_level
+ """
+ pass
+
+ def multiget_counter_slice(self, keys, column_parent, predicate, consistency_level):
+ """
+ Get counter slices from multiple keys.
+
+ Parameters:
+ - keys
+ - column_parent
+ - predicate
+ - consistency_level
+ """
+ pass
+
+ def remove_counter(self, key, path, consistency_level):
+ """
+ Remove a counter at the specified location.
+
+ Parameters:
+ - key
+ - path
+ - consistency_level
+ """
+ pass
+
+ def describe_schema_versions(self, ):
+ """
+ for each schema version present in the cluster, returns a list of nodes at that version.
+ hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION.
+ the cluster is all on the same version if the size of the map is 1.
+ """
+ pass
+
+ def describe_keyspaces(self, ):
+ """
+ list the defined keyspaces in this cluster
+ """
+ pass
+
+ def describe_cluster_name(self, ):
+ """
+ get the cluster name
+ """
+ pass
+
+ def describe_version(self, ):
+ """
+ get the thrift api version
+ """
+ pass
+
+ def describe_ring(self, keyspace):
+ """
+ get the token ring: a map of ranges to host addresses,
+ represented as a set of TokenRange instead of a map from range
+ to list of endpoints, because you can't use Thrift structs as
+ map keys:
+ https://issues.apache.org/jira/browse/THRIFT-162
+
+ for the same reason, we can't return a set here, even though
+ order is neither important nor predictable.
+
+ Parameters:
+ - keyspace
+ """
+ pass
+
+ def describe_partitioner(self, ):
+ """
+ returns the partitioner used by this cluster
+ """
+ pass
+
+ def describe_snitch(self, ):
+ """
+ returns the snitch used by this cluster
+ """
+ pass
+
+ def describe_keyspace(self, keyspace):
+ """
+ describe specified keyspace
+
+ Parameters:
+ - keyspace
+ """
+ pass
+
+ def describe_splits(self, cfName, start_token, end_token, keys_per_split):
+ """
+ experimental API for hadoop/parallel query support.
+ may change violently and without warning.
+
+ returns list of token strings such that first subrange is (list[0], list[1]],
+ next is (list[1], list[2]], etc.
+
+ Parameters:
+ - cfName
+ - start_token
+ - end_token
+ - keys_per_split
+ """
+ pass
+
+ def system_add_column_family(self, cf_def):
+ """
+ adds a column family. returns the new schema id.
+
+ Parameters:
+ - cf_def
+ """
+ pass
+
+ def system_drop_column_family(self, column_family):
+ """
+ drops a column family. returns the new schema id.
+
+ Parameters:
+ - column_family
+ """
+ pass
+
+ def system_add_keyspace(self, ks_def):
+ """
+ adds a keyspace and any column families that are part of it. returns the new schema id.
+
+ Parameters:
+ - ks_def
+ """
+ pass
+
+ def system_drop_keyspace(self, keyspace):
+ """
+ drops a keyspace and any column families that are part of it. returns the new schema id.
+
+ Parameters:
+ - keyspace
+ """
+ pass
+
+ def system_update_keyspace(self, ks_def):
+ """
+ updates properties of a keyspace. returns the new schema id.
+
+ Parameters:
+ - ks_def
+ """
+ pass
+
+ def system_update_column_family(self, cf_def):
+ """
+ updates properties of a column family. returns the new schema id.
+
+ Parameters:
+ - cf_def
+ """
+ pass
+
+ def execute_cql_query(self, query, compression):
+ """
+ Executes a CQL (Cassandra Query Language) statement and returns a
+ CqlResult containing the results.
+
+ Parameters:
+ - query
+ - compression
+ """
+ pass
+
+
+class Client(Iface):
+ def __init__(self, iprot, oprot=None):
+ self._iprot = self._oprot = iprot
+ if oprot != None:
+ self._oprot = oprot
+ self._seqid = 0
+
+ def login(self, auth_request):
+ """
+ Parameters:
+ - auth_request
+ """
+ self.send_login(auth_request)
+ self.recv_login()
+
+ def send_login(self, auth_request):
+ self._oprot.writeMessageBegin('login', TMessageType.CALL, self._seqid)
+ args = login_args()
+ args.auth_request = auth_request
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_login(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = login_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.authnx != None:
+ raise result.authnx
+ if result.authzx != None:
+ raise result.authzx
+ return
+
+ def set_keyspace(self, keyspace):
+ """
+ Parameters:
+ - keyspace
+ """
+ self.send_set_keyspace(keyspace)
+ self.recv_set_keyspace()
+
+ def send_set_keyspace(self, keyspace):
+ self._oprot.writeMessageBegin('set_keyspace', TMessageType.CALL, self._seqid)
+ args = set_keyspace_args()
+ args.keyspace = keyspace
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_set_keyspace(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = set_keyspace_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.ire != None:
+ raise result.ire
+ return
+
+ def get(self, key, column_path, consistency_level):
+ """
+ Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is
+ the only method that can throw an exception under non-failure conditions.)
+
+ Parameters:
+ - key
+ - column_path
+ - consistency_level
+ """
+ self.send_get(key, column_path, consistency_level)
+ return self.recv_get()
+
+ def send_get(self, key, column_path, consistency_level):
+ self._oprot.writeMessageBegin('get', TMessageType.CALL, self._seqid)
+ args = get_args()
+ args.key = key
+ args.column_path = column_path
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_get(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = get_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.nfe != None:
+ raise result.nfe
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "get failed: unknown result");
+
+ def get_slice(self, key, column_parent, predicate, consistency_level):
+ """
+ Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name
+ pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned.
+
+ Parameters:
+ - key
+ - column_parent
+ - predicate
+ - consistency_level
+ """
+ self.send_get_slice(key, column_parent, predicate, consistency_level)
+ return self.recv_get_slice()
+
+ def send_get_slice(self, key, column_parent, predicate, consistency_level):
+ self._oprot.writeMessageBegin('get_slice', TMessageType.CALL, self._seqid)
+ args = get_slice_args()
+ args.key = key
+ args.column_parent = column_parent
+ args.predicate = predicate
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_get_slice(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = get_slice_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "get_slice failed: unknown result");
+
+ def get_count(self, key, column_parent, predicate, consistency_level):
+ """
+ returns the number of columns matching <code>predicate</code> for a particular <code>key</code>,
+ <code>ColumnFamily</code> and optionally <code>SuperColumn</code>.
+
+ Parameters:
+ - key
+ - column_parent
+ - predicate
+ - consistency_level
+ """
+ self.send_get_count(key, column_parent, predicate, consistency_level)
+ return self.recv_get_count()
+
+ def send_get_count(self, key, column_parent, predicate, consistency_level):
+ self._oprot.writeMessageBegin('get_count', TMessageType.CALL, self._seqid)
+ args = get_count_args()
+ args.key = key
+ args.column_parent = column_parent
+ args.predicate = predicate
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_get_count(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = get_count_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "get_count failed: unknown result");
+
+ def multiget_slice(self, keys, column_parent, predicate, consistency_level):
+ """
+ Performs a get_slice for column_parent and predicate for the given keys in parallel.
+
+ Parameters:
+ - keys
+ - column_parent
+ - predicate
+ - consistency_level
+ """
+ self.send_multiget_slice(keys, column_parent, predicate, consistency_level)
+ return self.recv_multiget_slice()
+
+ def send_multiget_slice(self, keys, column_parent, predicate, consistency_level):
+ self._oprot.writeMessageBegin('multiget_slice', TMessageType.CALL, self._seqid)
+ args = multiget_slice_args()
+ args.keys = keys
+ args.column_parent = column_parent
+ args.predicate = predicate
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_multiget_slice(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = multiget_slice_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "multiget_slice failed: unknown result");
+
+ def multiget_count(self, keys, column_parent, predicate, consistency_level):
+ """
+ Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
+
+ Parameters:
+ - keys
+ - column_parent
+ - predicate
+ - consistency_level
+ """
+ self.send_multiget_count(keys, column_parent, predicate, consistency_level)
+ return self.recv_multiget_count()
+
+ def send_multiget_count(self, keys, column_parent, predicate, consistency_level):
+ self._oprot.writeMessageBegin('multiget_count', TMessageType.CALL, self._seqid)
+ args = multiget_count_args()
+ args.keys = keys
+ args.column_parent = column_parent
+ args.predicate = predicate
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_multiget_count(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = multiget_count_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "multiget_count failed: unknown result");
+
+ def get_range_slices(self, column_parent, predicate, range, consistency_level):
+ """
+ returns a subset of columns for a contiguous range of keys.
+
+ Parameters:
+ - column_parent
+ - predicate
+ - range
+ - consistency_level
+ """
+ self.send_get_range_slices(column_parent, predicate, range, consistency_level)
+ return self.recv_get_range_slices()
+
+ def send_get_range_slices(self, column_parent, predicate, range, consistency_level):
+ self._oprot.writeMessageBegin('get_range_slices', TMessageType.CALL, self._seqid)
+ args = get_range_slices_args()
+ args.column_parent = column_parent
+ args.predicate = predicate
+ args.range = range
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_get_range_slices(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = get_range_slices_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "get_range_slices failed: unknown result");
+
+ def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
+ """
+ Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
+
+ Parameters:
+ - column_parent
+ - index_clause
+ - column_predicate
+ - consistency_level
+ """
+ self.send_get_indexed_slices(column_parent, index_clause, column_predicate, consistency_level)
+ return self.recv_get_indexed_slices()
+
+ def send_get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
+ self._oprot.writeMessageBegin('get_indexed_slices', TMessageType.CALL, self._seqid)
+ args = get_indexed_slices_args()
+ args.column_parent = column_parent
+ args.index_clause = index_clause
+ args.column_predicate = column_predicate
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_get_indexed_slices(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = get_indexed_slices_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "get_indexed_slices failed: unknown result");
+
+ def insert(self, key, column_parent, column, consistency_level):
+ """
+ Insert a Column at the given column_parent.column_family and optional column_parent.super_column.
+
+ Parameters:
+ - key
+ - column_parent
+ - column
+ - consistency_level
+ """
+ self.send_insert(key, column_parent, column, consistency_level)
+ self.recv_insert()
+
+ def send_insert(self, key, column_parent, column, consistency_level):
+ self._oprot.writeMessageBegin('insert', TMessageType.CALL, self._seqid)
+ args = insert_args()
+ args.key = key
+ args.column_parent = column_parent
+ args.column = column
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_insert(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = insert_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ return
+
+ def remove(self, key, column_path, timestamp, consistency_level):
+ """
+ Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
+ that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
+ row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
+
+ Note that counters have limited support for deletes: if you remove
+ a counter, you must wait to issue any following update until the
+ delete has reached all the nodes and all of them have been fully
+ compacted.
+
+ Parameters:
+ - key
+ - column_path
+ - timestamp
+ - consistency_level
+ """
+ self.send_remove(key, column_path, timestamp, consistency_level)
+ self.recv_remove()
+
+ def send_remove(self, key, column_path, timestamp, consistency_level):
+ self._oprot.writeMessageBegin('remove', TMessageType.CALL, self._seqid)
+ args = remove_args()
+ args.key = key
+ args.column_path = column_path
+ args.timestamp = timestamp
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_remove(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = remove_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ return
+
+ def batch_mutate(self, mutation_map, consistency_level):
+ """
+ Mutate many columns or super columns for many row keys. See also: Mutation.
+
+ mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
+ *
+
+ Parameters:
+ - mutation_map
+ - consistency_level
+ """
+ self.send_batch_mutate(mutation_map, consistency_level)
+ self.recv_batch_mutate()
+
+ def send_batch_mutate(self, mutation_map, consistency_level):
+ self._oprot.writeMessageBegin('batch_mutate', TMessageType.CALL, self._seqid)
+ args = batch_mutate_args()
+ args.mutation_map = mutation_map
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_batch_mutate(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = batch_mutate_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ return
+
+ def truncate(self, cfname):
+ """
+ Truncate will mark and entire column family as deleted.
+ From the user's perspective a successful call to truncate will result complete data deletion from cfname.
+ Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one
+ only marks the data as deleted.
+ The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if
+ some hosts are down.
+
+ Parameters:
+ - cfname
+ """
+ self.send_truncate(cfname)
+ self.recv_truncate()
+
+ def send_truncate(self, cfname):
+ self._oprot.writeMessageBegin('truncate', TMessageType.CALL, self._seqid)
+ args = truncate_args()
+ args.cfname = cfname
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_truncate(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = truncate_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ return
+
+ def add(self, key, column_parent, column, consistency_level):
+ """
+ Increment or decrement a counter.
+
+ Parameters:
+ - key
+ - column_parent
+ - column
+ - consistency_level
+ """
+ self.send_add(key, column_parent, column, consistency_level)
+ self.recv_add()
+
+ def send_add(self, key, column_parent, column, consistency_level):
+ self._oprot.writeMessageBegin('add', TMessageType.CALL, self._seqid)
+ args = add_args()
+ args.key = key
+ args.column_parent = column_parent
+ args.column = column
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_add(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = add_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ return
+
+ def batch_add(self, update_map, consistency_level):
+ """
+ Batch increment or decrement a counter.
+
+ Parameters:
+ - update_map
+ - consistency_level
+ """
+ self.send_batch_add(update_map, consistency_level)
+ self.recv_batch_add()
+
+ def send_batch_add(self, update_map, consistency_level):
+ self._oprot.writeMessageBegin('batch_add', TMessageType.CALL, self._seqid)
+ args = batch_add_args()
+ args.update_map = update_map
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_batch_add(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = batch_add_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ return
+
+ def get_counter(self, key, path, consistency_level):
+ """
+ Return the counter at the specified column path.
+
+ Parameters:
+ - key
+ - path
+ - consistency_level
+ """
+ self.send_get_counter(key, path, consistency_level)
+ return self.recv_get_counter()
+
+ def send_get_counter(self, key, path, consistency_level):
+ self._oprot.writeMessageBegin('get_counter', TMessageType.CALL, self._seqid)
+ args = get_counter_args()
+ args.key = key
+ args.path = path
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_get_counter(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = get_counter_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.nfe != None:
+ raise result.nfe
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "get_counter failed: unknown result");
+
+ def get_counter_slice(self, key, column_parent, predicate, consistency_level):
+ """
+ Get a list of counters from the specified columns.
+
+ Parameters:
+ - key
+ - column_parent
+ - predicate
+ - consistency_level
+ """
+ self.send_get_counter_slice(key, column_parent, predicate, consistency_level)
+ return self.recv_get_counter_slice()
+
+ def send_get_counter_slice(self, key, column_parent, predicate, consistency_level):
+ self._oprot.writeMessageBegin('get_counter_slice', TMessageType.CALL, self._seqid)
+ args = get_counter_slice_args()
+ args.key = key
+ args.column_parent = column_parent
+ args.predicate = predicate
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_get_counter_slice(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = get_counter_slice_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "get_counter_slice failed: unknown result");
+
+ def multiget_counter_slice(self, keys, column_parent, predicate, consistency_level):
+ """
+ Get counter slices from multiple keys.
+
+ Parameters:
+ - keys
+ - column_parent
+ - predicate
+ - consistency_level
+ """
+ self.send_multiget_counter_slice(keys, column_parent, predicate, consistency_level)
+ return self.recv_multiget_counter_slice()
+
+ def send_multiget_counter_slice(self, keys, column_parent, predicate, consistency_level):
+ self._oprot.writeMessageBegin('multiget_counter_slice', TMessageType.CALL, self._seqid)
+ args = multiget_counter_slice_args()
+ args.keys = keys
+ args.column_parent = column_parent
+ args.predicate = predicate
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_multiget_counter_slice(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = multiget_counter_slice_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "multiget_counter_slice failed: unknown result");
+
+ def remove_counter(self, key, path, consistency_level):
+ """
+ Remove a counter at the specified location.
+
+ Parameters:
+ - key
+ - path
+ - consistency_level
+ """
+ self.send_remove_counter(key, path, consistency_level)
+ self.recv_remove_counter()
+
+ def send_remove_counter(self, key, path, consistency_level):
+ self._oprot.writeMessageBegin('remove_counter', TMessageType.CALL, self._seqid)
+ args = remove_counter_args()
+ args.key = key
+ args.path = path
+ args.consistency_level = consistency_level
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_remove_counter(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = remove_counter_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ return
+
+ def describe_schema_versions(self, ):
+ """
+ for each schema version present in the cluster, returns a list of nodes at that version.
+ hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION.
+ the cluster is all on the same version if the size of the map is 1.
+ """
+ self.send_describe_schema_versions()
+ return self.recv_describe_schema_versions()
+
+ def send_describe_schema_versions(self, ):
+ self._oprot.writeMessageBegin('describe_schema_versions', TMessageType.CALL, self._seqid)
+ args = describe_schema_versions_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_describe_schema_versions(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = describe_schema_versions_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_schema_versions failed: unknown result");
+
+ def describe_keyspaces(self, ):
+ """
+ list the defined keyspaces in this cluster
+ """
+ self.send_describe_keyspaces()
+ return self.recv_describe_keyspaces()
+
+ def send_describe_keyspaces(self, ):
+ self._oprot.writeMessageBegin('describe_keyspaces', TMessageType.CALL, self._seqid)
+ args = describe_keyspaces_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_describe_keyspaces(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = describe_keyspaces_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_keyspaces failed: unknown result");
+
+ def describe_cluster_name(self, ):
+ """
+ get the cluster name
+ """
+ self.send_describe_cluster_name()
+ return self.recv_describe_cluster_name()
+
+ def send_describe_cluster_name(self, ):
+ self._oprot.writeMessageBegin('describe_cluster_name', TMessageType.CALL, self._seqid)
+ args = describe_cluster_name_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_describe_cluster_name(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = describe_cluster_name_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_cluster_name failed: unknown result");
+
+ def describe_version(self, ):
+ """
+ get the thrift api version
+ """
+ self.send_describe_version()
+ return self.recv_describe_version()
+
+ def send_describe_version(self, ):
+ self._oprot.writeMessageBegin('describe_version', TMessageType.CALL, self._seqid)
+ args = describe_version_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_describe_version(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = describe_version_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_version failed: unknown result");
+
+ def describe_ring(self, keyspace):
+ """
+ get the token ring: a map of ranges to host addresses,
+ represented as a set of TokenRange instead of a map from range
+ to list of endpoints, because you can't use Thrift structs as
+ map keys:
+ https://issues.apache.org/jira/browse/THRIFT-162
+
+ for the same reason, we can't return a set here, even though
+ order is neither important nor predictable.
+
+ Parameters:
+ - keyspace
+ """
+ self.send_describe_ring(keyspace)
+ return self.recv_describe_ring()
+
+ def send_describe_ring(self, keyspace):
+ self._oprot.writeMessageBegin('describe_ring', TMessageType.CALL, self._seqid)
+ args = describe_ring_args()
+ args.keyspace = keyspace
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_describe_ring(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = describe_ring_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_ring failed: unknown result");
+
+ def describe_partitioner(self, ):
+ """
+ returns the partitioner used by this cluster
+ """
+ self.send_describe_partitioner()
+ return self.recv_describe_partitioner()
+
+ def send_describe_partitioner(self, ):
+ self._oprot.writeMessageBegin('describe_partitioner', TMessageType.CALL, self._seqid)
+ args = describe_partitioner_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_describe_partitioner(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = describe_partitioner_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_partitioner failed: unknown result");
+
+ def describe_snitch(self, ):
+ """
+ returns the snitch used by this cluster
+ """
+ self.send_describe_snitch()
+ return self.recv_describe_snitch()
+
+ def send_describe_snitch(self, ):
+ self._oprot.writeMessageBegin('describe_snitch', TMessageType.CALL, self._seqid)
+ args = describe_snitch_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_describe_snitch(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = describe_snitch_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_snitch failed: unknown result");
+
+ def describe_keyspace(self, keyspace):
+ """
+ describe specified keyspace
+
+ Parameters:
+ - keyspace
+ """
+ self.send_describe_keyspace(keyspace)
+ return self.recv_describe_keyspace()
+
+ def send_describe_keyspace(self, keyspace):
+ self._oprot.writeMessageBegin('describe_keyspace', TMessageType.CALL, self._seqid)
+ args = describe_keyspace_args()
+ args.keyspace = keyspace
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_describe_keyspace(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = describe_keyspace_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.nfe != None:
+ raise result.nfe
+ if result.ire != None:
+ raise result.ire
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_keyspace failed: unknown result");
+
+ def describe_splits(self, cfName, start_token, end_token, keys_per_split):
+ """
+ experimental API for hadoop/parallel query support.
+ may change violently and without warning.
+
+ returns list of token strings such that first subrange is (list[0], list[1]],
+ next is (list[1], list[2]], etc.
+
+ Parameters:
+ - cfName
+ - start_token
+ - end_token
+ - keys_per_split
+ """
+ self.send_describe_splits(cfName, start_token, end_token, keys_per_split)
+ return self.recv_describe_splits()
+
+ def send_describe_splits(self, cfName, start_token, end_token, keys_per_split):
+ self._oprot.writeMessageBegin('describe_splits', TMessageType.CALL, self._seqid)
+ args = describe_splits_args()
+ args.cfName = cfName
+ args.start_token = start_token
+ args.end_token = end_token
+ args.keys_per_split = keys_per_split
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_describe_splits(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = describe_splits_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_splits failed: unknown result");
+
+ def system_add_column_family(self, cf_def):
+ """
+ adds a column family. returns the new schema id.
+
+ Parameters:
+ - cf_def
+ """
+ self.send_system_add_column_family(cf_def)
+ return self.recv_system_add_column_family()
+
+ def send_system_add_column_family(self, cf_def):
+ self._oprot.writeMessageBegin('system_add_column_family', TMessageType.CALL, self._seqid)
+ args = system_add_column_family_args()
+ args.cf_def = cf_def
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_system_add_column_family(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = system_add_column_family_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.sde != None:
+ raise result.sde
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "system_add_column_family failed: unknown result");
+
+ def system_drop_column_family(self, column_family):
+ """
+ drops a column family. returns the new schema id.
+
+ Parameters:
+ - column_family
+ """
+ self.send_system_drop_column_family(column_family)
+ return self.recv_system_drop_column_family()
+
+ def send_system_drop_column_family(self, column_family):
+ self._oprot.writeMessageBegin('system_drop_column_family', TMessageType.CALL, self._seqid)
+ args = system_drop_column_family_args()
+ args.column_family = column_family
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_system_drop_column_family(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = system_drop_column_family_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.sde != None:
+ raise result.sde
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "system_drop_column_family failed: unknown result");
+
+ def system_add_keyspace(self, ks_def):
+ """
+ adds a keyspace and any column families that are part of it. returns the new schema id.
+
+ Parameters:
+ - ks_def
+ """
+ self.send_system_add_keyspace(ks_def)
+ return self.recv_system_add_keyspace()
+
+ def send_system_add_keyspace(self, ks_def):
+ self._oprot.writeMessageBegin('system_add_keyspace', TMessageType.CALL, self._seqid)
+ args = system_add_keyspace_args()
+ args.ks_def = ks_def
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_system_add_keyspace(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = system_add_keyspace_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.sde != None:
+ raise result.sde
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "system_add_keyspace failed: unknown result");
+
+ def system_drop_keyspace(self, keyspace):
+ """
+ drops a keyspace and any column families that are part of it. returns the new schema id.
+
+ Parameters:
+ - keyspace
+ """
+ self.send_system_drop_keyspace(keyspace)
+ return self.recv_system_drop_keyspace()
+
+ def send_system_drop_keyspace(self, keyspace):
+ self._oprot.writeMessageBegin('system_drop_keyspace', TMessageType.CALL, self._seqid)
+ args = system_drop_keyspace_args()
+ args.keyspace = keyspace
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_system_drop_keyspace(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = system_drop_keyspace_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.sde != None:
+ raise result.sde
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "system_drop_keyspace failed: unknown result");
+
+ def system_update_keyspace(self, ks_def):
+ """
+ updates properties of a keyspace. returns the new schema id.
+
+ Parameters:
+ - ks_def
+ """
+ self.send_system_update_keyspace(ks_def)
+ return self.recv_system_update_keyspace()
+
+ def send_system_update_keyspace(self, ks_def):
+ self._oprot.writeMessageBegin('system_update_keyspace', TMessageType.CALL, self._seqid)
+ args = system_update_keyspace_args()
+ args.ks_def = ks_def
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_system_update_keyspace(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = system_update_keyspace_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.sde != None:
+ raise result.sde
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "system_update_keyspace failed: unknown result");
+
+ def system_update_column_family(self, cf_def):
+ """
+ updates properties of a column family. returns the new schema id.
+
+ Parameters:
+ - cf_def
+ """
+ self.send_system_update_column_family(cf_def)
+ return self.recv_system_update_column_family()
+
+ def send_system_update_column_family(self, cf_def):
+ self._oprot.writeMessageBegin('system_update_column_family', TMessageType.CALL, self._seqid)
+ args = system_update_column_family_args()
+ args.cf_def = cf_def
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_system_update_column_family(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = system_update_column_family_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.sde != None:
+ raise result.sde
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "system_update_column_family failed: unknown result");
+
+ def execute_cql_query(self, query, compression):
+ """
+ Executes a CQL (Cassandra Query Language) statement and returns a
+ CqlResult containing the results.
+
+ Parameters:
+ - query
+ - compression
+ """
+ self.send_execute_cql_query(query, compression)
+ return self.recv_execute_cql_query()
+
+ def send_execute_cql_query(self, query, compression):
+ self._oprot.writeMessageBegin('execute_cql_query', TMessageType.CALL, self._seqid)
+ args = execute_cql_query_args()
+ args.query = query
+ args.compression = compression
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_execute_cql_query(self, ):
+ (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(self._iprot)
+ self._iprot.readMessageEnd()
+ raise x
+ result = execute_cql_query_result()
+ result.read(self._iprot)
+ self._iprot.readMessageEnd()
+ if result.success != None:
+ return result.success
+ if result.ire != None:
+ raise result.ire
+ if result.ue != None:
+ raise result.ue
+ if result.te != None:
+ raise result.te
+ if result.sde != None:
+ raise result.sde
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "execute_cql_query failed: unknown result");
+
+
+class Processor(Iface, TProcessor):
+ def __init__(self, handler):
+ self._handler = handler
+ self._processMap = {}
+ self._processMap["login"] = Processor.process_login
+ self._processMap["set_keyspace"] = Processor.process_set_keyspace
+ self._processMap["get"] = Processor.process_get
+ self._processMap["get_slice"] = Processor.process_get_slice
+ self._processMap["get_count"] = Processor.process_get_count
+ self._processMap["multiget_slice"] = Processor.process_multiget_slice
+ self._processMap["multiget_count"] = Processor.process_multiget_count
+ self._processMap["get_range_slices"] = Processor.process_get_range_slices
+ self._processMap["get_indexed_slices"] = Processor.process_get_indexed_slices
+ self._processMap["insert"] = Processor.process_insert
+ self._processMap["remove"] = Processor.process_remove
+ self._processMap["batch_mutate"] = Processor.process_batch_mutate
+ self._processMap["truncate"] = Processor.process_truncate
+ self._processMap["add"] = Processor.process_add
+ self._processMap["batch_add"] = Processor.process_batch_add
+ self._processMap["get_counter"] = Processor.process_get_counter
+ self._processMap["get_counter_slice"] = Processor.process_get_counter_slice
+ self._processMap["multiget_counter_slice"] = Processor.process_multiget_counter_slice
+ self._processMap["remove_counter"] = Processor.process_remove_counter
+ self._processMap["describe_schema_versions"] = Processor.process_describe_schema_versions
+ self._processMap["describe_keyspaces"] = Processor.process_describe_keyspaces
+ self._processMap["describe_cluster_name"] = Processor.process_describe_cluster_name
+ self._processMap["describe_version"] = Processor.process_describe_version
+ self._processMap["describe_ring"] = Processor.process_describe_ring
+ self._processMap["describe_partitioner"] = Processor.process_describe_partitioner
+ self._processMap["describe_snitch"] = Processor.process_describe_snitch
+ self._processMap["describe_keyspace"] = Processor.process_describe_keyspace
+ self._processMap["describe_splits"] = Processor.process_describe_splits
+ self._processMap["system_add_column_family"] = Processor.process_system_add_column_family
+ self._processMap["system_drop_column_family"] = Processor.process_system_drop_column_family
+ self._processMap["system_add_keyspace"] = Processor.process_system_add_keyspace
+ self._processMap["system_drop_keyspace"] = Processor.process_system_drop_keyspace
+ self._processMap["system_update_keyspace"] = Processor.process_system_update_keyspace
+ self._processMap["system_update_column_family"] = Processor.process_system_update_column_family
+ self._processMap["execute_cql_query"] = Processor.process_execute_cql_query
+
+ def process(self, iprot, oprot):
+ (name, type, seqid) = iprot.readMessageBegin()
+ if name not in self._processMap:
+ iprot.skip(TType.STRUCT)
+ iprot.readMessageEnd()
+ x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
+ oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
+ x.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+ return
+ else:
+ self._processMap[name](self, seqid, iprot, oprot)
+ return True
+
+ def process_login(self, seqid, iprot, oprot):
+ args = login_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = login_result()
+ try:
+ self._handler.login(args.auth_request)
+ except AuthenticationException, authnx:
+ result.authnx = authnx
+ except AuthorizationException, authzx:
+ result.authzx = authzx
+ oprot.writeMessageBegin("login", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_set_keyspace(self, seqid, iprot, oprot):
+ args = set_keyspace_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = set_keyspace_result()
+ try:
+ self._handler.set_keyspace(args.keyspace)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ oprot.writeMessageBegin("set_keyspace", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_get(self, seqid, iprot, oprot):
+ args = get_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = get_result()
+ try:
+ result.success = self._handler.get(args.key, args.column_path, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except NotFoundException, nfe:
+ result.nfe = nfe
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("get", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_get_slice(self, seqid, iprot, oprot):
+ args = get_slice_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = get_slice_result()
+ try:
+ result.success = self._handler.get_slice(args.key, args.column_parent, args.predicate, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("get_slice", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_get_count(self, seqid, iprot, oprot):
+ args = get_count_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = get_count_result()
+ try:
+ result.success = self._handler.get_count(args.key, args.column_parent, args.predicate, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("get_count", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_multiget_slice(self, seqid, iprot, oprot):
+ args = multiget_slice_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = multiget_slice_result()
+ try:
+ result.success = self._handler.multiget_slice(args.keys, args.column_parent, args.predicate, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("multiget_slice", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_multiget_count(self, seqid, iprot, oprot):
+ args = multiget_count_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = multiget_count_result()
+ try:
+ result.success = self._handler.multiget_count(args.keys, args.column_parent, args.predicate, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("multiget_count", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_get_range_slices(self, seqid, iprot, oprot):
+ args = get_range_slices_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = get_range_slices_result()
+ try:
+ result.success = self._handler.get_range_slices(args.column_parent, args.predicate, args.range, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("get_range_slices", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_get_indexed_slices(self, seqid, iprot, oprot):
+ args = get_indexed_slices_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = get_indexed_slices_result()
+ try:
+ result.success = self._handler.get_indexed_slices(args.column_parent, args.index_clause, args.column_predicate, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("get_indexed_slices", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_insert(self, seqid, iprot, oprot):
+ args = insert_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = insert_result()
+ try:
+ self._handler.insert(args.key, args.column_parent, args.column, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("insert", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_remove(self, seqid, iprot, oprot):
+ args = remove_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = remove_result()
+ try:
+ self._handler.remove(args.key, args.column_path, args.timestamp, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("remove", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_batch_mutate(self, seqid, iprot, oprot):
+ args = batch_mutate_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = batch_mutate_result()
+ try:
+ self._handler.batch_mutate(args.mutation_map, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("batch_mutate", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_truncate(self, seqid, iprot, oprot):
+ args = truncate_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = truncate_result()
+ try:
+ self._handler.truncate(args.cfname)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ oprot.writeMessageBegin("truncate", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_add(self, seqid, iprot, oprot):
+ args = add_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = add_result()
+ try:
+ self._handler.add(args.key, args.column_parent, args.column, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("add", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_batch_add(self, seqid, iprot, oprot):
+ args = batch_add_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = batch_add_result()
+ try:
+ self._handler.batch_add(args.update_map, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("batch_add", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_get_counter(self, seqid, iprot, oprot):
+ args = get_counter_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = get_counter_result()
+ try:
+ result.success = self._handler.get_counter(args.key, args.path, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except NotFoundException, nfe:
+ result.nfe = nfe
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("get_counter", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_get_counter_slice(self, seqid, iprot, oprot):
+ args = get_counter_slice_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = get_counter_slice_result()
+ try:
+ result.success = self._handler.get_counter_slice(args.key, args.column_parent, args.predicate, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("get_counter_slice", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_multiget_counter_slice(self, seqid, iprot, oprot):
+ args = multiget_counter_slice_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = multiget_counter_slice_result()
+ try:
+ result.success = self._handler.multiget_counter_slice(args.keys, args.column_parent, args.predicate, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("multiget_counter_slice", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_remove_counter(self, seqid, iprot, oprot):
+ args = remove_counter_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = remove_counter_result()
+ try:
+ self._handler.remove_counter(args.key, args.path, args.consistency_level)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("remove_counter", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_describe_schema_versions(self, seqid, iprot, oprot):
+ args = describe_schema_versions_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = describe_schema_versions_result()
+ try:
+ result.success = self._handler.describe_schema_versions()
+ except InvalidRequestException, ire:
+ result.ire = ire
+ oprot.writeMessageBegin("describe_schema_versions", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_describe_keyspaces(self, seqid, iprot, oprot):
+ args = describe_keyspaces_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = describe_keyspaces_result()
+ try:
+ result.success = self._handler.describe_keyspaces()
+ except InvalidRequestException, ire:
+ result.ire = ire
+ oprot.writeMessageBegin("describe_keyspaces", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_describe_cluster_name(self, seqid, iprot, oprot):
+ args = describe_cluster_name_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = describe_cluster_name_result()
+ result.success = self._handler.describe_cluster_name()
+ oprot.writeMessageBegin("describe_cluster_name", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_describe_version(self, seqid, iprot, oprot):
+ args = describe_version_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = describe_version_result()
+ result.success = self._handler.describe_version()
+ oprot.writeMessageBegin("describe_version", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_describe_ring(self, seqid, iprot, oprot):
+ args = describe_ring_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = describe_ring_result()
+ try:
+ result.success = self._handler.describe_ring(args.keyspace)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ oprot.writeMessageBegin("describe_ring", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_describe_partitioner(self, seqid, iprot, oprot):
+ args = describe_partitioner_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = describe_partitioner_result()
+ result.success = self._handler.describe_partitioner()
+ oprot.writeMessageBegin("describe_partitioner", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_describe_snitch(self, seqid, iprot, oprot):
+ args = describe_snitch_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = describe_snitch_result()
+ result.success = self._handler.describe_snitch()
+ oprot.writeMessageBegin("describe_snitch", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_describe_keyspace(self, seqid, iprot, oprot):
+ args = describe_keyspace_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = describe_keyspace_result()
+ try:
+ result.success = self._handler.describe_keyspace(args.keyspace)
+ except NotFoundException, nfe:
+ result.nfe = nfe
+ except InvalidRequestException, ire:
+ result.ire = ire
+ oprot.writeMessageBegin("describe_keyspace", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_describe_splits(self, seqid, iprot, oprot):
+ args = describe_splits_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = describe_splits_result()
+ try:
+ result.success = self._handler.describe_splits(args.cfName, args.start_token, args.end_token, args.keys_per_split)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ oprot.writeMessageBegin("describe_splits", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_system_add_column_family(self, seqid, iprot, oprot):
+ args = system_add_column_family_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = system_add_column_family_result()
+ try:
+ result.success = self._handler.system_add_column_family(args.cf_def)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except SchemaDisagreementException, sde:
+ result.sde = sde
+ oprot.writeMessageBegin("system_add_column_family", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_system_drop_column_family(self, seqid, iprot, oprot):
+ args = system_drop_column_family_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = system_drop_column_family_result()
+ try:
+ result.success = self._handler.system_drop_column_family(args.column_family)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except SchemaDisagreementException, sde:
+ result.sde = sde
+ oprot.writeMessageBegin("system_drop_column_family", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_system_add_keyspace(self, seqid, iprot, oprot):
+ args = system_add_keyspace_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = system_add_keyspace_result()
+ try:
+ result.success = self._handler.system_add_keyspace(args.ks_def)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except SchemaDisagreementException, sde:
+ result.sde = sde
+ oprot.writeMessageBegin("system_add_keyspace", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_system_drop_keyspace(self, seqid, iprot, oprot):
+ args = system_drop_keyspace_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = system_drop_keyspace_result()
+ try:
+ result.success = self._handler.system_drop_keyspace(args.keyspace)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except SchemaDisagreementException, sde:
+ result.sde = sde
+ oprot.writeMessageBegin("system_drop_keyspace", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_system_update_keyspace(self, seqid, iprot, oprot):
+ args = system_update_keyspace_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = system_update_keyspace_result()
+ try:
+ result.success = self._handler.system_update_keyspace(args.ks_def)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except SchemaDisagreementException, sde:
+ result.sde = sde
+ oprot.writeMessageBegin("system_update_keyspace", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_system_update_column_family(self, seqid, iprot, oprot):
+ args = system_update_column_family_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = system_update_column_family_result()
+ try:
+ result.success = self._handler.system_update_column_family(args.cf_def)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except SchemaDisagreementException, sde:
+ result.sde = sde
+ oprot.writeMessageBegin("system_update_column_family", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_execute_cql_query(self, seqid, iprot, oprot):
+ args = execute_cql_query_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = execute_cql_query_result()
+ try:
+ result.success = self._handler.execute_cql_query(args.query, args.compression)
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ except SchemaDisagreementException, sde:
+ result.sde = sde
+ oprot.writeMessageBegin("execute_cql_query", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+
+# HELPER FUNCTIONS AND STRUCTURES
+
+class login_args:
+ """
+ Attributes:
+ - auth_request
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'auth_request', (AuthenticationRequest, AuthenticationRequest.thrift_spec), None, ), # 1
+ )
+
+ def __init__(self, auth_request=None,):
+ self.auth_request = auth_request
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.auth_request = AuthenticationRequest()
+ self.auth_request.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('login_args')
+ if self.auth_request != None:
+ oprot.writeFieldBegin('auth_request', TType.STRUCT, 1)
+ self.auth_request.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+ def validate(self):
+ if self.auth_request is None:
+ raise TProtocol.TProtocolException(message='Required field auth_request is unset!')
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class login_result:
+ """
+ Attributes:
+ - authnx
+ - authzx
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'authnx', (AuthenticationException, AuthenticationException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'authzx', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
+ )
+
+ def __init__(self, authnx=None, authzx=None,):
+ self.authnx = authnx
+ self.authzx = authzx
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.authnx = AuthenticationException()
+ self.authnx.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.authzx = AuthorizationException()
+ self.authzx.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('login_result')
+ if self.authnx != None:
+ oprot.writeFieldBegin('authnx', TType.STRUCT, 1)
+ self.authnx.write(oprot)
+ oprot.writeFieldEnd()
+ if self.authzx != None:
+ oprot.writeFieldBegin('authzx', TType.STRUCT, 2)
+ self.authzx.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class set_keyspace_args:
+ """
+ Attributes:
+ - keyspace
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'keyspace', None, None, ), # 1
+ )
+
+ def __init__(self, keyspace=None,):
+ self.keyspace = keyspace
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.keyspace = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('set_keyspace_args')
+ if self.keyspace != None:
+ oprot.writeFieldBegin('keyspace', TType.STRING, 1)
+ oprot.writeString(self.keyspace)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+ def validate(self):
+ if self.keyspace is None:
+ raise TProtocol.TProtocolException(message='Required field keyspace is unset!')
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class set_keyspace_result:
+ """
+ Attributes:
+ - ire
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
+ )
+
+ def __init__(self, ire=None,):
+ self.ire = ire
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.ire = InvalidRequestException()
+ self.ire.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('set_keyspace_result')
+ if self.ire != None:
+ oprot.writeFieldBegin('ire', TType.STRUCT, 1)
+ self.ire.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class get_args:
+ """
+ Attributes:
+ - key
+ - column_path
+ - consistency_level
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'key', None, None, ), # 1
+ (2, TType.STRUCT, 'column_path', (ColumnPath, ColumnPath.thrift_spec), None, ), # 2
+ (3, TType.I32, 'consistency_level', None, 1, ), # 3
+ )
+
+ def __init__(self, key=None, column_path=None, consistency_level=thrift_spec[3][4],):
+ self.key = key
+ self.column_path = column_path
+ self.consistency_level = consistency_level
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.key = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.column_path = ColumnPath()
+ self.column_path.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.I32:
+ self.consistency_level = iprot.readI32();
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('get_args')
+ if self.key != None:
+ oprot.writeFieldBegin('key', TType.STRING, 1)
+ oprot.writeString(self.key)
+ oprot.writeFieldEnd()
+ if self.column_path != None:
+ oprot.writeFieldBegin('column_path', TType.STRUCT, 2)
+ self.column_path.write(oprot)
+ oprot.writeFieldEnd()
+ if self.consistency_level != None:
[... 5500 lines stripped ...]