You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by ha...@apache.org on 2023/01/10 07:06:49 UTC
[iotdb] 01/02: [To rel/1.0] Separate SYNC_CONNECTION_ERROR and DISPATCH_ERROR
This is an automated email from the ASF dual-hosted git repository.
haonan pushed a commit to branch sync_connection
in repository https://gitbox.apache.org/repos/asf/iotdb.git
commit 51f66026b410c524134295341584b8f36b35f70f
Author: Haonan <hh...@outlook.com>
AuthorDate: Tue Jan 10 14:12:00 2023 +0800
[To rel/1.0] Separate SYNC_CONNECTION_ERROR and DISPATCH_ERROR
---
docs/UserGuide/Reference/Status-Codes.md | 234 +-
docs/zh/UserGuide/Reference/Status-Codes.md | 235 +-
mlnode/LICENSE | 289 +
mlnode/iotdb/thrift/__init__.py | 0
mlnode/iotdb/thrift/common/__init__.py | 1 +
mlnode/iotdb/thrift/common/constants.py | 14 +
mlnode/iotdb/thrift/common/ttypes.py | 1345 ++
.../thrift/confignode/IConfigNodeRPCService-remote | 733 +
.../thrift/confignode/IConfigNodeRPCService.py | 16798 +++++++++++++++++++
mlnode/iotdb/thrift/confignode/__init__.py | 1 +
mlnode/iotdb/thrift/confignode/constants.py | 14 +
mlnode/iotdb/thrift/confignode/ttypes.py | 10257 +++++++++++
.../thrift/datanode/IDataNodeRPCService-remote | 523 +
.../iotdb/thrift/datanode/IDataNodeRPCService.py | 11223 +++++++++++++
.../thrift/datanode/MPPDataExchangeService-remote | 138 +
.../thrift/datanode/MPPDataExchangeService.py | 740 +
mlnode/iotdb/thrift/datanode/__init__.py | 1 +
mlnode/iotdb/thrift/datanode/constants.py | 14 +
mlnode/iotdb/thrift/datanode/ttypes.py | 6208 +++++++
.../iotdb/thrift/mlnode/IMLNodeRPCService-remote | 131 +
mlnode/iotdb/thrift/mlnode/IMLNodeRPCService.py | 617 +
mlnode/iotdb/thrift/mlnode/__init__.py | 1 +
mlnode/iotdb/thrift/mlnode/constants.py | 14 +
mlnode/iotdb/thrift/mlnode/ttypes.py | 385 +
.../apache/iotdb/commons/utils/StatusUtils.java | 4 +-
.../db/mpp/plan/scheduler/ClusterScheduler.java | 2 +-
.../scheduler/FragmentInstanceDispatcherImpl.java | 2 +-
.../scheduler/load/LoadTsFileDispatcherImpl.java | 4 +-
.../java/org/apache/iotdb/rpc/TSStatusCode.java | 1 +
29 files changed, 49691 insertions(+), 238 deletions(-)
diff --git a/docs/UserGuide/Reference/Status-Codes.md b/docs/UserGuide/Reference/Status-Codes.md
index 9d26153bfe..d74a796f87 100644
--- a/docs/UserGuide/Reference/Status-Codes.md
+++ b/docs/UserGuide/Reference/Status-Codes.md
@@ -42,122 +42,124 @@ With Status Code, instead of writing codes like `if (e.getErrorMessage().contain
Here is a list of Status Code and related message:
-|Status Code|Status Type|Meanings|
-|:--|:---|:---|
-|200|SUCCESS_STATUS||
-|201|INCOMPATIBLE_VERSION|Incompatible version|
-|202|CONFIGURATION_ERROR|Configuration error|
-|203|START_UP_ERROR|Meet error while starting|
-|204|SHUT_DOWN_ERROR|Meet error while shutdown|
-|300|UNSUPPORTED_OPERATION|Unsupported operation|
-|301|EXECUTE_STATEMENT_ERROR|Execute statement error|
-|302|MULTIPLE_ERROR|Meet error when executing multiple statements|
-|303|ILLEGAL_PARAMETER|Parameter is illegal|
-|304|OVERLAP_WITH_EXISTING_TASK|Current task has some conflict with existing tasks|
-|305|INTERNAL_SERVER_ERROR|Internal server error|
-|400|REDIRECTION_RECOMMEND|Recommend Client redirection|
-|500|DATABASE_NOT_EXIST|Database does not exist|
-|501|DATABASE_ALREADY_EXISTS|Database already exist|
-|502|SERIES_OVERFLOW|Series number exceeds the threshold|
-|503|TIMESERIES_ALREADY_EXIST|Timeseries already exists|
-|504|TIMESERIES_IN_BLACK_LIST|Timeseries is being deleted|
-|505|ALIAS_ALREADY_EXIST|Alias already exists|
-|506|PATH_ALREADY_EXIST|Path already exists|
-|507|METADATA_ERROR|Meet error when dealing with metadata|
-|508|PATH_NOT_EXIST|Path does not exist|
-|509|ILLEGAL_PATH|Illegal path|
-|510|CREATE_TEMPLATE_ERROR|Create schema template error|
-|511|DUPLICATED_TEMPLATE|Schema template is duplicated|
-|512|UNDEFINED_TEMPLATE|Schema template is not defined|
-|513|TEMPLATE_NOT_SET|Schema template is not set|
-|514|DIFFERENT_TEMPLATE|Template is not consistent|
-|515|TEMPLATE_IS_IN_USE|Template is in use|
-|516|TEMPLATE_INCOMPATIBLE|Template is not compatible|
-|517|SEGMENT_NOT_FOUND|Segment not found|
-|518|PAGE_OUT_OF_SPACE|No enough space on schema page|
-|519|RECORD_DUPLICATED|Record is duplicated|
-|520|SEGMENT_OUT_OF_SPACE|No enough space on schema segment|
-|521|SCHEMA_FILE_NOT_EXISTS|SchemaFile does not exist|
-|522|OVERSIZE_RECORD|Size of record exceeds the threshold of page of SchemaFile|
-|523|SCHEMA_FILE_REDO_LOG_BROKEN|SchemaFile redo log has broken|
-|524|TEMPLATE_NOT_ACTIVATED|Schema template is not activated|
-|600|SYSTEM_READ_ONLY|IoTDB system is read only|
-|601|STORAGE_ENGINE_ERROR|Storage engine related error|
-|602|STORAGE_ENGINE_NOT_READY|The storage engine is in recovery, not ready fore accepting read/write operation|
-|603|DATAREGION_PROCESS_ERROR|DataRegion related error|
-|604|TSFILE_PROCESSOR_ERROR|TsFile processor related error|
-|605|WRITE_PROCESS_ERROR|Writing data related error|
-|606|WRITE_PROCESS_REJECT|Writing data rejected error|
-|607|OUT_OF_TTL|Insertion time is less than TTL time bound|
-|608|COMPACTION_ERROR|Meet error while merging|
-|609|ALIGNED_TIMESERIES_ERROR|Meet error in aligned timeseries|
-|610|WAL_ERROR|WAL error|
-|611|DISK_SPACE_INSUFFICIENT|Disk space is insufficient|
-|700|SQL_PARSE_ERROR|Meet error while parsing SQL|
-|701|SEMANTIC_ERROR|SQL semantic error|
-|702|GENERATE_TIME_ZONE_ERROR|Meet error while generating time zone|
-|703|SET_TIME_ZONE_ERROR|Meet error while setting time zone|
-|704|QUERY_NOT_ALLOWED|Query statements are not allowed error|
-|705|LOGICAL_OPERATOR_ERROR|Logical operator related error|
-|706|LOGICAL_OPTIMIZE_ERROR|Logical optimize related error|
-|707|UNSUPPORTED_FILL_TYPE|Unsupported fill type related error|
-|708|QUERY_PROCESS_ERROR|Query process related error|
-|709|MPP_MEMORY_NOT_ENOUGH|Not enough memory for task execution in MPP|
-|710|CLOSE_OPERATION_ERROR|Meet error in close operation|
-|711|TSBLOCK_SERIALIZE_ERROR|TsBlock serialization error|
-|712|INTERNAL_REQUEST_TIME_OUT|MPP Operation timeout|
-|713|INTERNAL_REQUEST_RETRY_ERROR|Internal operation retry failed|
-|800|AUTHENTICATION_ERROR|Error in authentication|
-|801|WRONG_LOGIN_PASSWORD|Username or password is wrong|
-|802|NOT_LOGIN|Has not logged in|
-|803|NO_PERMISSION|No permissions for this operation, please add privilege|
-|804|UNINITIALIZED_AUTH_ERROR|Uninitialized authorizer|
-|805|USER_NOT_EXIST|User does not exist|
-|806|ROLE_NOT_EXIST|Role does not exist|
-|807|CLEAR_PERMISSION_CACHE_ERROR|Error when clear the permission cache|
-|900|MIGRATE_REGION_ERROR|Error when migrate region|
-|901|CREATE_REGION_ERROR|Create region error|
-|902|DELETE_REGION_ERROR|Delete region error|
-|903|PARTITION_CACHE_UPDATE_ERROR|Update partition cache failed|
-|904|CONSENSUS_NOT_INITIALIZED|Consensus is not initialized and cannot provide service|
-|905|REGION_LEADER_CHANGE_ERROR|Region leader migration failed|
-|906|NO_AVAILABLE_REGION_GROUP|Cannot find an available region group|
-|1000|DATANODE_ALREADY_REGISTERED|DataNode already registered in cluster|
-|1001|NO_ENOUGH_DATANODE|The number of DataNode is not enough, cannot remove DataNode or create enough replication|
-|1002|ADD_CONFIGNODE_ERROR|Add ConfigNode error|
-|1003|REMOVE_CONFIGNODE_ERROR|Remove ConfigNode error|
-|1004|DATANODE_NOT_EXIST|DataNode not exist error|
-|1005|DATANODE_STOP_ERROR|DataNode stop error|
-|1006|REMOVE_DATANODE_ERROR|Remove datanode failed|
-|1007|REGISTER_DATANODE_WITH_WRONG_ID|The DataNode to be registered has incorrect register id|
-|1008|CAN_NOT_CONNECT_DATANODE|Can not connect to DataNode|
-|1100|LOAD_FILE_ERROR|Meet error while loading file|
-|1101|LOAD_PIECE_OF_TSFILE_ERROR|Error when load a piece of TsFile when loading|
-|1102|DESERIALIZE_PIECE_OF_TSFILE_ERROR|Error when deserialize a piece of TsFile|
-|1103|CREATE_PIPE_SINK_ERROR|Failed to create a PIPE sink|
-|1104|PIPE_ERROR|PIPE error|
-|1105|PIPESERVER_ERROR|PIPE server error|
-|1106|SYNC_CONNECTION_ERROR|Meet error while sync connecting|
-|1107|SYNC_FILE_REDIRECTION_ERROR|Sync TsFile redirection error|
-|1108|SYNC_FILE_ERROR|Sync TsFile error|
-|1109|VERIFY_METADATA_ERROR|Meet error in validate timeseries schema|
-|1200|UDF_LOAD_CLASS_ERROR|Error when loading UDF class|
-|1201|UDF_DOWNLOAD_ERROR|DataNode cannot download UDF from ConfigNode|
-|1202|CREATE_UDF_ON_DATANODE_ERROR|Error when create UDF on DataNode|
-|1203|DROP_UDF_ON_DATANODE_ERROR|Error when drop a UDF on DataNode|
-|1300|CREATE_TRIGGER_ERROR|ConfigNode create trigger error|
-|1301|DROP_TRIGGER_ERROR|ConfigNode delete Trigger error|
-|1302|TRIGGER_FIRE_ERROR|Error when firing trigger|
-|1303|TRIGGER_LOAD_CLASS_ERROR|Error when load class of trigger|
-|1304|TRIGGER_DOWNLOAD_ERROR|Error when download trigger from ConfigNode|
-|1305|CREATE_TRIGGER_INSTANCE_ERROR|Error when create trigger instance|
-|1306|ACTIVE_TRIGGER_INSTANCE_ERROR|Error when activate trigger instance|
-|1307|DROP_TRIGGER_INSTANCE_ERROR|Error when drop trigger instance|
-|1308|UPDATE_TRIGGER_LOCATION_ERROR|Error when move stateful trigger to new datanode|
-|1400|NO_SUCH_CQ|CQ task does not exist|
-|1401|CQ_ALREADY_ACTIVE|CQ is already active|
-|1402|CQ_AlREADY_EXIST|CQ is already exist|
-|1403|CQ_UPDATE_LAST_EXEC_TIME_ERROR|CQ update last execution time failed|
+
+| Status Code | Status Type | Meanings |
+|:------------|:----------------------------------|:------------------------------------------------------------------------------------------|
+| 200 | SUCCESS_STATUS | |
+| 201 | INCOMPATIBLE_VERSION | Incompatible version |
+| 202 | CONFIGURATION_ERROR | Configuration error |
+| 203 | START_UP_ERROR | Meet error while starting |
+| 204 | SHUT_DOWN_ERROR | Meet error while shutdown |
+| 300 | UNSUPPORTED_OPERATION | Unsupported operation |
+| 301 | EXECUTE_STATEMENT_ERROR | Execute statement error |
+| 302 | MULTIPLE_ERROR | Meet error when executing multiple statements |
+| 303 | ILLEGAL_PARAMETER | Parameter is illegal |
+| 304 | OVERLAP_WITH_EXISTING_TASK | Current task has some conflict with existing tasks |
+| 305 | INTERNAL_SERVER_ERROR | Internal server error |
+| 306 | DISPATCH_ERROR | Meet error while dispatching |
+| 400 | REDIRECTION_RECOMMEND | Recommend Client redirection |
+| 500 | DATABASE_NOT_EXIST | Database does not exist |
+| 501 | DATABASE_ALREADY_EXISTS | Database already exist |
+| 502 | SERIES_OVERFLOW | Series number exceeds the threshold |
+| 503 | TIMESERIES_ALREADY_EXIST | Timeseries already exists |
+| 504 | TIMESERIES_IN_BLACK_LIST | Timeseries is being deleted |
+| 505 | ALIAS_ALREADY_EXIST | Alias already exists |
+| 506 | PATH_ALREADY_EXIST | Path already exists |
+| 507 | METADATA_ERROR | Meet error when dealing with metadata |
+| 508 | PATH_NOT_EXIST | Path does not exist |
+| 509 | ILLEGAL_PATH | Illegal path |
+| 510 | CREATE_TEMPLATE_ERROR | Create schema template error |
+| 511 | DUPLICATED_TEMPLATE | Schema template is duplicated |
+| 512 | UNDEFINED_TEMPLATE | Schema template is not defined |
+| 513 | TEMPLATE_NOT_SET | Schema template is not set |
+| 514 | DIFFERENT_TEMPLATE | Template is not consistent |
+| 515 | TEMPLATE_IS_IN_USE | Template is in use |
+| 516 | TEMPLATE_INCOMPATIBLE | Template is not compatible |
+| 517 | SEGMENT_NOT_FOUND | Segment not found |
+| 518 | PAGE_OUT_OF_SPACE | No enough space on schema page |
+| 519 | RECORD_DUPLICATED | Record is duplicated |
+| 520 | SEGMENT_OUT_OF_SPACE | No enough space on schema segment |
+| 521 | SCHEMA_FILE_NOT_EXISTS | SchemaFile does not exist |
+| 522 | OVERSIZE_RECORD | Size of record exceeds the threshold of page of SchemaFile |
+| 523 | SCHEMA_FILE_REDO_LOG_BROKEN | SchemaFile redo log has broken |
+| 524 | TEMPLATE_NOT_ACTIVATED | Schema template is not activated |
+| 600 | SYSTEM_READ_ONLY | IoTDB system is read only |
+| 601 | STORAGE_ENGINE_ERROR | Storage engine related error |
+| 602 | STORAGE_ENGINE_NOT_READY | The storage engine is in recovery, not ready fore accepting read/write operation |
+| 603 | DATAREGION_PROCESS_ERROR | DataRegion related error |
+| 604 | TSFILE_PROCESSOR_ERROR | TsFile processor related error |
+| 605 | WRITE_PROCESS_ERROR | Writing data related error |
+| 606 | WRITE_PROCESS_REJECT | Writing data rejected error |
+| 607 | OUT_OF_TTL | Insertion time is less than TTL time bound |
+| 608 | COMPACTION_ERROR | Meet error while merging |
+| 609 | ALIGNED_TIMESERIES_ERROR | Meet error in aligned timeseries |
+| 610 | WAL_ERROR | WAL error |
+| 611 | DISK_SPACE_INSUFFICIENT | Disk space is insufficient |
+| 700 | SQL_PARSE_ERROR | Meet error while parsing SQL |
+| 701 | SEMANTIC_ERROR | SQL semantic error |
+| 702 | GENERATE_TIME_ZONE_ERROR | Meet error while generating time zone |
+| 703 | SET_TIME_ZONE_ERROR | Meet error while setting time zone |
+| 704 | QUERY_NOT_ALLOWED | Query statements are not allowed error |
+| 705 | LOGICAL_OPERATOR_ERROR | Logical operator related error |
+| 706 | LOGICAL_OPTIMIZE_ERROR | Logical optimize related error |
+| 707 | UNSUPPORTED_FILL_TYPE | Unsupported fill type related error |
+| 708 | QUERY_PROCESS_ERROR | Query process related error |
+| 709 | MPP_MEMORY_NOT_ENOUGH | Not enough memory for task execution in MPP |
+| 710 | CLOSE_OPERATION_ERROR | Meet error in close operation |
+| 711 | TSBLOCK_SERIALIZE_ERROR | TsBlock serialization error |
+| 712 | INTERNAL_REQUEST_TIME_OUT | MPP Operation timeout |
+| 713 | INTERNAL_REQUEST_RETRY_ERROR | Internal operation retry failed |
+| 800 | UNINITIALIZED_AUTH_ERROR | Failed to initialize auth module |
+| 801 | WRONG_LOGIN_PASSWORD | Username or password is wrong |
+| 802 | NOT_LOGIN | Not login |
+| 803 | NO_PERMISSION | No permisstion to operate |
+| 804 | USER_NOT_EXIST | User not exists |
+| 805 | USER_ALREADY_EXIST | User already exists |
+| 806 | USER_ALREADY_HAS_ROLE | User already has target role |
+| 807 | USER_NOT_HAS_ROLE | User not has target role |
+| 900 | MIGRATE_REGION_ERROR | Error when migrate region |
+| 901 | CREATE_REGION_ERROR | Create region error |
+| 902 | DELETE_REGION_ERROR | Delete region error |
+| 903 | PARTITION_CACHE_UPDATE_ERROR | Update partition cache failed |
+| 904 | CONSENSUS_NOT_INITIALIZED | Consensus is not initialized and cannot provide service |
+| 905 | REGION_LEADER_CHANGE_ERROR | Region leader migration failed |
+| 906 | NO_AVAILABLE_REGION_GROUP | Cannot find an available region group |
+| 1000 | DATANODE_ALREADY_REGISTERED | DataNode already registered in cluster |
+| 1001 | NO_ENOUGH_DATANODE | The number of DataNode is not enough, cannot remove DataNode or create enough replication |
+| 1002 | ADD_CONFIGNODE_ERROR | Add ConfigNode error |
+| 1003 | REMOVE_CONFIGNODE_ERROR | Remove ConfigNode error |
+| 1004 | DATANODE_NOT_EXIST | DataNode not exist error |
+| 1005 | DATANODE_STOP_ERROR | DataNode stop error |
+| 1006 | REMOVE_DATANODE_ERROR | Remove datanode failed |
+| 1007 | REGISTER_DATANODE_WITH_WRONG_ID | The DataNode to be registered has incorrect register id |
+| 1008 | CAN_NOT_CONNECT_DATANODE | Can not connect to DataNode |
+| 1100 | LOAD_FILE_ERROR | Meet error while loading file |
+| 1101 | LOAD_PIECE_OF_TSFILE_ERROR | Error when load a piece of TsFile when loading |
+| 1102 | DESERIALIZE_PIECE_OF_TSFILE_ERROR | Error when deserialize a piece of TsFile |
+| 1103 | SYNC_CONNECTION_ERROR | Sync connection error |
+| 1104 | SYNC_FILE_REDIRECTION_ERROR | Sync TsFile redirection error |
+| 1105 | SYNC_FILE_ERROR | Sync TsFile error |
+| 1106 | CREATE_PIPE_SINK_ERROR | Failed to create a PIPE sink |
+| 1107 | PIPE_ERROR | PIPE error |
+| 1108 | PIPESERVER_ERROR | PIPE server error |
+| 1109 | VERIFY_METADATA_ERROR | Meet error in validate timeseries schema |
+| 1200 | UDF_LOAD_CLASS_ERROR | Error when loading UDF class |
+| 1201 | UDF_DOWNLOAD_ERROR | DataNode cannot download UDF from ConfigNode |
+| 1202 | CREATE_UDF_ON_DATANODE_ERROR | Error when create UDF on DataNode |
+| 1203 | DROP_UDF_ON_DATANODE_ERROR | Error when drop a UDF on DataNode |
+| 1300 | CREATE_TRIGGER_ERROR | ConfigNode create trigger error |
+| 1301 | DROP_TRIGGER_ERROR | ConfigNode delete Trigger error |
+| 1302 | TRIGGER_FIRE_ERROR | Error when firing trigger |
+| 1303 | TRIGGER_LOAD_CLASS_ERROR | Error when load class of trigger |
+| 1304 | TRIGGER_DOWNLOAD_ERROR | Error when download trigger from ConfigNode |
+| 1305 | CREATE_TRIGGER_INSTANCE_ERROR | Error when create trigger instance |
+| 1306 | ACTIVE_TRIGGER_INSTANCE_ERROR | Error when activate trigger instance |
+| 1307 | DROP_TRIGGER_INSTANCE_ERROR | Error when drop trigger instance |
+| 1308 | UPDATE_TRIGGER_LOCATION_ERROR | Error when move stateful trigger to new datanode |
+| 1400 | NO_SUCH_CQ | CQ task does not exist |
+| 1401 | CQ_ALREADY_ACTIVE | CQ is already active |
+| 1402 | CQ_AlREADY_EXIST | CQ is already exist |
+| 1403 | CQ_UPDATE_LAST_EXEC_TIME_ERROR | CQ update last execution time failed |
> All exceptions are refactored in the latest version by extracting uniform message into exception classes. Different error codes are added to all exceptions. When an exception is caught and a higher-level exception is thrown, the error code will keep and pass so that users will know the detailed error reason.
A base exception class "ProcessException" is also added to be extended by all exceptions.
diff --git a/docs/zh/UserGuide/Reference/Status-Codes.md b/docs/zh/UserGuide/Reference/Status-Codes.md
index b8ccebbd02..3be5db9e39 100644
--- a/docs/zh/UserGuide/Reference/Status-Codes.md
+++ b/docs/zh/UserGuide/Reference/Status-Codes.md
@@ -43,122 +43,125 @@ try {
这里是状态码和相对应信息的列表:
-|状态码|状态类型|状态信息|
-|:--|:---|:---|
-|200|SUCCESS_STATUS|成功状态|
-|201|INCOMPATIBLE_VERSION|版本不兼容|
-|202|CONFIGURATION_ERROR|配置文件有错误项|
-|203|START_UP_ERROR|启动错误|
-|204|SHUT_DOWN_ERROR|关机错误|
-|300|UNSUPPORTED_OPERATION|不支持的操作|
-|301|EXECUTE_STATEMENT_ERROR|执行语句错误|
-|302|MULTIPLE_ERROR|多行语句执行错误|
-|303|ILLEGAL_PARAMETER|参数错误|
-|304|OVERLAP_WITH_EXISTING_TASK|与正在执行的其他操作冲突|
-|305|INTERNAL_SERVER_ERROR|服务器内部错误|
-|400|REDIRECTION_RECOMMEND|推荐客户端重定向|
-|500|DATABASE_NOT_EXIST|数据库不存在|
-|501|DATABASE_ALREADY_EXISTS|数据库已存在|
-|502|SERIES_OVERFLOW|序列数量超过阈值|
-|503|TIMESERIES_ALREADY_EXIST|时间序列已存在|
-|504|TIMESERIES_IN_BLACK_LIST|时间序列正在删除|
-|505|ALIAS_ALREADY_EXIST|路径别名已经存在|
-|506|PATH_ALREADY_EXIST|路径已经存在|
-|507|METADATA_ERROR|处理元数据错误|
-|508|PATH_NOT_EXIST|路径不存在|
-|509|ILLEGAL_PATH|路径不合法|
-|510|CREATE_TEMPLATE_ERROR|创建物理量模板失败|
-|511|DUPLICATED_TEMPLATE|元数据模板重复|
-|512|UNDEFINED_TEMPLATE|元数据模板未定义|
-|513|TEMPLATE_NOT_SET|元数据模板未设置|
-|514|DIFFERENT_TEMPLATE|元数据模板不一致|
-|515|TEMPLATE_IS_IN_USE|元数据模板正在使用|
-|516|TEMPLATE_INCOMPATIBLE|元数据模板不兼容|
-|517|SEGMENT_NOT_FOUND|未找到 Segment|
-|518|PAGE_OUT_OF_SPACE|SchemaFile 中 Page 空间不够|
-|519|RECORD_DUPLICATED|记录重复|
-|520|SEGMENT_OUT_OF_SPACE|SchemaFile 中 segment 空间不够|
-|521|SCHEMA_FILE_NOT_EXISTS|SchemaFile 不存在|
-|522|OVERSIZE_RECORD|记录大小超过元数据文件页面大小|
-|523|SCHEMA_FILE_REDO_LOG_BROKEN|SchemaFile 的 redo 日志损坏|
-|524|TEMPLATE_NOT_ACTIVATED|元数据模板未激活|
-|600|SYSTEM_READ_ONLY|IoTDB 系统只读|
-|601|STORAGE_ENGINE_ERROR|存储引擎相关错误|
-|602|STORAGE_ENGINE_NOT_READY|存储引擎还在恢复中,还不能接受读写操作|
-|603|DATAREGION_PROCESS_ERROR|DataRegion 相关错误|
-|604|TSFILE_PROCESSOR_ERROR|TsFile 处理器相关错误|
-|605|WRITE_PROCESS_ERROR|写入相关错误|
-|606|WRITE_PROCESS_REJECT|写入拒绝错误|
-|607|OUT_OF_TTL|插入时间少于 TTL 时间边界|
-|608|COMPACTION_ERROR|合并错误|
-|609|ALIGNED_TIMESERIES_ERROR|对齐时间序列错误|
-|610|WAL_ERROR|WAL 异常|
-|611|DISK_SPACE_INSUFFICIENT|磁盘空间不足|
-|700|SQL_PARSE_ERROR|SQL 语句分析错误|
-|701|SEMANTIC_ERROR|SQL 语义错误|
-|702|GENERATE_TIME_ZONE_ERROR|生成时区错误|
-|703|SET_TIME_ZONE_ERROR|设置时区错误|
-|704|QUERY_NOT_ALLOWED|查询语句不允许|
-|705|LOGICAL_OPERATOR_ERROR|逻辑符相关错误|
-|706|LOGICAL_OPTIMIZE_ERROR|逻辑优化相关错误|
-|707|UNSUPPORTED_FILL_TYPE|不支持的填充类型|
-|708|QUERY_PROCESS_ERROR|查询处理相关错误|
-|709|MPP_MEMORY_NOT_ENOUGH|MPP 框架中任务执行内存不足|
-|710|CLOSE_OPERATION_ERROR|关闭操作错误|
-|711|TSBLOCK_SERIALIZE_ERROR|TsBlock 序列化错误|
-|712|INTERNAL_REQUEST_TIME_OUT|MPP 操作超时|
-|713|INTERNAL_REQUEST_RETRY_ERROR|内部操作重试失败|
-|800|AUTHENTICATION_ERROR|权限认证失败|
-|801|WRONG_LOGIN_PASSWORD|用户名或密码错误|
-|802|NOT_LOGIN|没有登录|
-|803|NO_PERMISSION|没有操作权限|
-|804|UNINITIALIZED_AUTH_ERROR|授权人未初始化|
-|805|USER_NOT_EXIST|用户不存在|
-|806|ROLE_NOT_EXIST|角色不存在|
-|807|CLEAR_PERMISSION_CACHE_ERROR|清空权限缓存失败|
-|900|MIGRATE_REGION_ERROR|Region 迁移失败|
-|901|CREATE_REGION_ERROR|创建 region 失败|
-|902|DELETE_REGION_ERROR|删除 region 失败|
-|903|PARTITION_CACHE_UPDATE_ERROR|更新分区缓存失败|
-|904|CONSENSUS_NOT_INITIALIZED|共识层未初始化,不能提供服务|
-|905|REGION_LEADER_CHANGE_ERROR|Region leader 迁移失败|
-|906|NO_AVAILABLE_REGION_GROUP|无法找到可用的 Region 副本组|
-|1000|DATANODE_ALREADY_REGISTERED|DataNode 在集群中已经注册|
-|1001|NO_ENOUGH_DATANODE|DataNode 数量不足,无法移除节点或创建副本|
-|1002|ADD_CONFIGNODE_ERROR|新增 ConfigNode 失败|
-|1003|REMOVE_CONFIGNODE_ERROR|移除 ConfigNode 失败|
-|1004|DATANODE_NOT_EXIST|此 DataNode 不存在|
-|1005|DATANODE_STOP_ERROR|DataNode 关闭失败|
-|1006|REMOVE_DATANODE_ERROR|移除 datanode 失败|
-|1007|REGISTER_DATANODE_WITH_WRONG_ID|注册的 DataNode 中有错误的注册id|
-|1008|CAN_NOT_CONNECT_DATANODE|连接 DataNode 失败|
-|1100|LOAD_FILE_ERROR|加载文件错误|
-|1101|LOAD_PIECE_OF_TSFILE_ERROR|加载 TsFile 片段异常|
-|1102|DESERIALIZE_PIECE_OF_TSFILE_ERROR|反序列化 TsFile 片段异常|
-|1103|SYNC_CONNECTION_ERROR|回传连接错误|
-|1104|SYNC_FILE_REDIRECTION_ERROR|同步文件时重定向异常|
-|1105|SYNC_FILE_ERROR|同步文件异常|
-|1106|CREATE_PIPE_SINK_ERROR|创建 PIPE Sink 失败|
-|1107|PIPE_ERROR|PIPE 异常|
-|1108|PIPESERVER_ERROR|PIPE server 异常|
-|1109|VERIFY_METADATA_ERROR|校验元数据失败|
-|1200|UDF_LOAD_CLASS_ERROR|UDF 加载类异常|
-|1201|UDF_DOWNLOAD_ERROR|无法从 ConfigNode 下载 UDF|
-|1202|CREATE_UDF_ON_DATANODE_ERROR|在 DataNode 创建 UDF 失败|
-|1203|DROP_UDF_ON_DATANODE_ERROR|在 DataNode 卸载 UDF 失败|
-|1300|CREATE_TRIGGER_ERROR|ConfigNode 创建 Trigger 失败|
-|1301|DROP_TRIGGER_ERROR|ConfigNode 删除 Trigger 失败|
-|1302|TRIGGER_FIRE_ERROR|触发器执行错误|
-|1303|TRIGGER_LOAD_CLASS_ERROR|触发器加载类异常|
-|1304|TRIGGER_DOWNLOAD_ERROR|从 ConfigNode 下载触发器异常|
-|1305|CREATE_TRIGGER_INSTANCE_ERROR|创建触发器实例异常|
-|1306|ACTIVE_TRIGGER_INSTANCE_ERROR|激活触发器实例异常|
-|1307|DROP_TRIGGER_INSTANCE_ERROR|删除触发器实例异常|
-|1308|UPDATE_TRIGGER_LOCATION_ERROR|更新有状态的触发器所在 DataNode 异常|
-|1400|NO_SUCH_CQ|CQ 任务不存在|
-|1401|CQ_ALREADY_ACTIVE|CQ 任务已激活|
-|1402|CQ_AlREADY_EXIST|CQ 任务已存在|
-|1403|CQ_UPDATE_LAST_EXEC_TIME_ERROR|CQ 更新上一次执行时间失败|
+| 状态码 | 状态类型 | 状态信息 |
+|:-----|:----------------------------------|:--------------------------|
+| 200 | SUCCESS_STATUS | 成功状态 |
+| 201 | INCOMPATIBLE_VERSION | 版本不兼容 |
+| 202 | CONFIGURATION_ERROR | 配置文件有错误项 |
+| 203 | START_UP_ERROR | 启动错误 |
+| 204 | SHUT_DOWN_ERROR | 关机错误 |
+| 300 | UNSUPPORTED_OPERATION | 不支持的操作 |
+| 301 | EXECUTE_STATEMENT_ERROR | 执行语句错误 |
+| 302 | MULTIPLE_ERROR | 多行语句执行错误 |
+| 303 | ILLEGAL_PARAMETER | 参数错误 |
+| 304 | OVERLAP_WITH_EXISTING_TASK | 与正在执行的其他操作冲突 |
+| 305 | INTERNAL_SERVER_ERROR | 服务器内部错误 |
+| 306 | DISPATCH_ERROR | 分发错误 |
+| 400 | REDIRECTION_RECOMMEND | 推荐客户端重定向 |
+| 500 | DATABASE_NOT_EXIST | 数据库不存在 |
+| 501 | DATABASE_ALREADY_EXISTS | 数据库已存在 |
+| 502 | SERIES_OVERFLOW | 序列数量超过阈值 |
+| 503 | TIMESERIES_ALREADY_EXIST | 时间序列已存在 |
+| 504 | TIMESERIES_IN_BLACK_LIST | 时间序列正在删除 |
+| 505 | ALIAS_ALREADY_EXIST | 路径别名已经存在 |
+| 506 | PATH_ALREADY_EXIST | 路径已经存在 |
+| 507 | METADATA_ERROR | 处理元数据错误 |
+| 508 | PATH_NOT_EXIST | 路径不存在 |
+| 509 | ILLEGAL_PATH | 路径不合法 |
+| 510 | CREATE_TEMPLATE_ERROR | 创建物理量模板失败 |
+| 511 | DUPLICATED_TEMPLATE | 元数据模板重复 |
+| 512 | UNDEFINED_TEMPLATE | 元数据模板未定义 |
+| 513 | TEMPLATE_NOT_SET | 元数据模板未设置 |
+| 514 | DIFFERENT_TEMPLATE | 元数据模板不一致 |
+| 515 | TEMPLATE_IS_IN_USE | 元数据模板正在使用 |
+| 516 | TEMPLATE_INCOMPATIBLE | 元数据模板不兼容 |
+| 517 | SEGMENT_NOT_FOUND | 未找到 Segment |
+| 518 | PAGE_OUT_OF_SPACE | SchemaFile 中 Page 空间不够 |
+| 519 | RECORD_DUPLICATED | 记录重复 |
+| 520 | SEGMENT_OUT_OF_SPACE | SchemaFile 中 segment 空间不够 |
+| 521 | SCHEMA_FILE_NOT_EXISTS | SchemaFile 不存在 |
+| 522 | OVERSIZE_RECORD | 记录大小超过元数据文件页面大小 |
+| 523 | SCHEMA_FILE_REDO_LOG_BROKEN | SchemaFile 的 redo 日志损坏 |
+| 524 | TEMPLATE_NOT_ACTIVATED | 元数据模板未激活 |
+| 600 | SYSTEM_READ_ONLY | IoTDB 系统只读 |
+| 601 | STORAGE_ENGINE_ERROR | 存储引擎相关错误 |
+| 602 | STORAGE_ENGINE_NOT_READY | 存储引擎还在恢复中,还不能接受读写操作 |
+| 603 | DATAREGION_PROCESS_ERROR | DataRegion 相关错误 |
+| 604 | TSFILE_PROCESSOR_ERROR | TsFile 处理器相关错误 |
+| 605 | WRITE_PROCESS_ERROR | 写入相关错误 |
+| 606 | WRITE_PROCESS_REJECT | 写入拒绝错误 |
+| 607 | OUT_OF_TTL | 插入时间少于 TTL 时间边界 |
+| 608 | COMPACTION_ERROR | 合并错误 |
+| 609 | ALIGNED_TIMESERIES_ERROR | 对齐时间序列错误 |
+| 610 | WAL_ERROR | WAL 异常 |
+| 611 | DISK_SPACE_INSUFFICIENT | 磁盘空间不足 |
+| 700 | SQL_PARSE_ERROR | SQL 语句分析错误 |
+| 701 | SEMANTIC_ERROR | SQL 语义错误 |
+| 702 | GENERATE_TIME_ZONE_ERROR | 生成时区错误 |
+| 703 | SET_TIME_ZONE_ERROR | 设置时区错误 |
+| 704 | QUERY_NOT_ALLOWED | 查询语句不允许 |
+| 705 | LOGICAL_OPERATOR_ERROR | 逻辑符相关错误 |
+| 706 | LOGICAL_OPTIMIZE_ERROR | 逻辑优化相关错误 |
+| 707 | UNSUPPORTED_FILL_TYPE | 不支持的填充类型 |
+| 708 | QUERY_PROCESS_ERROR | 查询处理相关错误 |
+| 709 | MPP_MEMORY_NOT_ENOUGH | MPP 框架中任务执行内存不足 |
+| 710 | CLOSE_OPERATION_ERROR | 关闭操作错误 |
+| 711 | TSBLOCK_SERIALIZE_ERROR | TsBlock 序列化错误 |
+| 712 | INTERNAL_REQUEST_TIME_OUT | MPP 操作超时 |
+| 713 | INTERNAL_REQUEST_RETRY_ERROR | 内部操作重试失败 |
+| 714 | NO_SUCH_QUERY | 查询不存在 |
+| 715 | QUERY_WAS_KILLED | 查询执行时被终止 |
+| 800 | UNINITIALIZED_AUTH_ERROR | 授权模块未初始化 |
+| 801 | WRONG_LOGIN_PASSWORD | 用户名或密码错误 |
+| 802 | NOT_LOGIN | 没有登录 |
+| 803 | NO_PERMISSION | 没有操作权限 |
+| 804 | USER_NOT_EXIST | 用户不存在 |
+| 805 | USER_ALREADY_EXIST | 用户已存在 |
+| 806 | USER_ALREADY_HAS_ROLE | 用户拥有对应角色 |
+| 807 | USER_NOT_HAS_ROLE | 用户未拥有对应角色 |
+| 900 | MIGRATE_REGION_ERROR | Region 迁移失败 |
+| 901 | CREATE_REGION_ERROR | 创建 region 失败 |
+| 902 | DELETE_REGION_ERROR | 删除 region 失败 |
+| 903 | PARTITION_CACHE_UPDATE_ERROR | 更新分区缓存失败 |
+| 904 | CONSENSUS_NOT_INITIALIZED | 共识层未初始化,不能提供服务 |
+| 905 | REGION_LEADER_CHANGE_ERROR | Region leader 迁移失败 |
+| 906 | NO_AVAILABLE_REGION_GROUP | 无法找到可用的 Region 副本组 |
+| 1000 | DATANODE_ALREADY_REGISTERED | DataNode 在集群中已经注册 |
+| 1001 | NO_ENOUGH_DATANODE | DataNode 数量不足,无法移除节点或创建副本 |
+| 1002 | ADD_CONFIGNODE_ERROR | 新增 ConfigNode 失败 |
+| 1003 | REMOVE_CONFIGNODE_ERROR | 移除 ConfigNode 失败 |
+| 1004 | DATANODE_NOT_EXIST | 此 DataNode 不存在 |
+| 1005 | DATANODE_STOP_ERROR | DataNode 关闭失败 |
+| 1006 | REMOVE_DATANODE_ERROR | 移除 datanode 失败 |
+| 1007 | REGISTER_DATANODE_WITH_WRONG_ID | 注册的 DataNode 中有错误的注册id |
+| 1008 | CAN_NOT_CONNECT_DATANODE | 连接 DataNode 失败 |
+| 1100 | LOAD_FILE_ERROR | 加载文件错误 |
+| 1101 | LOAD_PIECE_OF_TSFILE_ERROR | 加载 TsFile 片段异常 |
+| 1102 | DESERIALIZE_PIECE_OF_TSFILE_ERROR | 反序列化 TsFile 片段异常 |
+| 1103 | SYNC_CONNECTION_ERROR | 同步连接错误 |
+| 1104 | SYNC_FILE_REDIRECTION_ERROR | 同步文件时重定向异常 |
+| 1105 | SYNC_FILE_ERROR | 同步文件异常 |
+| 1106 | CREATE_PIPE_SINK_ERROR | 创建 PIPE Sink 失败 |
+| 1107 | PIPE_ERROR | PIPE 异常 |
+| 1108 | PIPESERVER_ERROR | PIPE server 异常 |
+| 1109 | VERIFY_METADATA_ERROR | 校验元数据失败 |
+| 1200 | UDF_LOAD_CLASS_ERROR | UDF 加载类异常 |
+| 1201 | UDF_DOWNLOAD_ERROR | 无法从 ConfigNode 下载 UDF |
+| 1202 | CREATE_UDF_ON_DATANODE_ERROR | 在 DataNode 创建 UDF 失败 |
+| 1203 | DROP_UDF_ON_DATANODE_ERROR | 在 DataNode 卸载 UDF 失败 |
+| 1300 | CREATE_TRIGGER_ERROR | ConfigNode 创建 Trigger 失败 |
+| 1301 | DROP_TRIGGER_ERROR | ConfigNode 删除 Trigger 失败 |
+| 1302 | TRIGGER_FIRE_ERROR | 触发器执行错误 |
+| 1303 | TRIGGER_LOAD_CLASS_ERROR | 触发器加载类异常 |
+| 1304 | TRIGGER_DOWNLOAD_ERROR | 从 ConfigNode 下载触发器异常 |
+| 1305 | CREATE_TRIGGER_INSTANCE_ERROR | 创建触发器实例异常 |
+| 1306 | ACTIVE_TRIGGER_INSTANCE_ERROR | 激活触发器实例异常 |
+| 1307 | DROP_TRIGGER_INSTANCE_ERROR | 删除触发器实例异常 |
+| 1308 | UPDATE_TRIGGER_LOCATION_ERROR | 更新有状态的触发器所在 DataNode 异常 |
+| 1400 | NO_SUCH_CQ | CQ 任务不存在 |
+| 1401 | CQ_ALREADY_ACTIVE | CQ 任务已激活 |
+| 1402 | CQ_AlREADY_EXIST | CQ 任务已存在 |
+| 1403 | CQ_UPDATE_LAST_EXEC_TIME_ERROR | CQ 更新上一次执行时间失败 |
> 在最新版本中,我们重构了 IoTDB 的异常类。通过将错误信息统一提取到异常类中,并为所有异常添加不同的错误代码,从而当捕获到异常并引发更高级别的异常时,错误代码将保留并传递,以便用户了解详细的错误原因。
除此之外,我们添加了一个基础异常类“ProcessException”,由所有异常扩展。
diff --git a/mlnode/LICENSE b/mlnode/LICENSE
new file mode 100644
index 0000000000..8aef993efa
--- /dev/null
+++ b/mlnode/LICENSE
@@ -0,0 +1,289 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+--------------------------------------------------------------------------------
+ APACHE IOTDB SUBCOMPONENTS
+--------------------------------------------------------------------------------
+
+The following class is copied from maven-wrapper (https://github.com/takari/maven-wrapper),
+which is under Apache License 2.0:
+
+mvnw files from https://github.com/apache/maven-wrapper Apache 2.0
+
+--------------------------------------------------------------------------------
+
+The following class is modified from Apache commons-collections
+
+./tsfile/src/main/java/org/apache/iotdb/tsfile/utils/Murmur128Hash.java
+Relevant pr is: https://github.com/apache/commons-collections/pull/83/
+
+--------------------------------------------------------------------------------
+
+The following files include code modified from Michael Burman's gorilla-tsc project.
+
+./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/GorillaEncoderV2.java
+./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/IntGorillaEncoder.java
+./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/LongGorillaEncoder.java
+./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/SinglePrecisionEncoderV2.java
+./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/encoder/DoublePrecisionEncoderV2.java
+./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/GorillaDecoderV2.java
+./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/IntGorillaDecoder.java
+./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/LongGorillaDecoder.java
+./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/SinglePrecisionDecoderV2.java
+./tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/DoublePrecisionDecoderV2.java
+
+Copyright: 2016-2018 Michael Burman and/or other contributors
+Project page: https://github.com/burmanm/gorilla-tsc
+License: http://www.apache.org/licenses/LICENSE-2.0
+
+--------------------------------------------------------------------------------
+
+The following files include code modified from Apache HBase project.
+
+./confignode/src/main/java/org/apache/iotdb/procedure/Procedure.java
+./confignode/src/main/java/org/apache/iotdb/procedure/ProcedureExecutor.java
+./confignode/src/main/java/org/apache/iotdb/procedure/StateMachineProcedure.java
+./confignode/src/main/java/org/apache/iotdb/procedure/TimeoutExecutorThread.java
+./confignode/src/main/java/org/apache/iotdb/procedure/StoppableThread.java
+
+Copyright: 2016-2018 Michael Burman and/or other contributors
+Project page: https://github.com/burmanm/gorilla-tsc
+License: http://www.apache.org/licenses/LICENSE-2.0
+
+--------------------------------------------------------------------------------
+
+The following files include code modified from Eclipse Collections project.
+
+./tsfile/src/main/java/org/apache/iotdb/tsfile/utils/ByteArrayList.java
+
+Copyright: 2021 Goldman Sachs
+Project page: https://www.eclipse.org/collections
+License: https://github.com/eclipse/eclipse-collections/blob/master/LICENSE-EDL-1.0.txt
+
+--------------------------------------------------------------------------------
+
+The following files include code modified from Micrometer project.
+
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmClassLoaderMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmCompileMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmGcMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmMemoryMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmThreadMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/logback/LogbackMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/utils/JvmUtils
+
+Copyright: 2017 VMware
+Project page: https://github.com/micrometer-metrics/micrometer
+License: https://github.com/micrometer-metrics/micrometer/blob/main/LICENSE
+
+--------------------------------------------------------------------------------
+
+The following files include code modified from Trino project(https://github.com/trinodb/trino),
+which is under Apache License 2.0:
+
+./server/src/main/java/org/apache/iotdb/db/mpp/execution/QueryState.java
+./server/src/main/java/org/apache/iotdb/db/mpp/execution/StateMachine.java
+./server/src/main/java/org/apache/iotdb/db/mpp/execution/driver/Driver.java
+./server/src/main/java/org/apache/iotdb/db/mpp/execution/fragment/FragmentInstanceState.java
+./server/src/main/java/org/apache/iotdb/db/mpp/execution/fragment/FragmentInstanceStateMachine.java
+./server/src/main/java/org/apache/iotdb/db/mpp/plan/planner/LocalExecutionPlanner.java
+./tsfile/src/main/java/org/apache/iotdb/tsfile/read/common/block/*
diff --git a/mlnode/iotdb/thrift/__init__.py b/mlnode/iotdb/thrift/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/mlnode/iotdb/thrift/common/__init__.py b/mlnode/iotdb/thrift/common/__init__.py
new file mode 100644
index 0000000000..adefd8e51f
--- /dev/null
+++ b/mlnode/iotdb/thrift/common/__init__.py
@@ -0,0 +1 @@
+__all__ = ['ttypes', 'constants']
diff --git a/mlnode/iotdb/thrift/common/constants.py b/mlnode/iotdb/thrift/common/constants.py
new file mode 100644
index 0000000000..69c181ade3
--- /dev/null
+++ b/mlnode/iotdb/thrift/common/constants.py
@@ -0,0 +1,14 @@
+#
+# Autogenerated by Thrift Compiler (0.14.1)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+# options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
+from thrift.protocol.TProtocol import TProtocolException
+from thrift.TRecursive import fix_spec
+
+import sys
+from .ttypes import *
diff --git a/mlnode/iotdb/thrift/common/ttypes.py b/mlnode/iotdb/thrift/common/ttypes.py
new file mode 100644
index 0000000000..0b98dd8343
--- /dev/null
+++ b/mlnode/iotdb/thrift/common/ttypes.py
@@ -0,0 +1,1345 @@
+#
+# Autogenerated by Thrift Compiler (0.14.1)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+# options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
+from thrift.protocol.TProtocol import TProtocolException
+from thrift.TRecursive import fix_spec
+
+import sys
+
+from thrift.transport import TTransport
+all_structs = []
+
+
+class TConsensusGroupType(object):
+ ConfigNodeRegion = 0
+ DataRegion = 1
+ SchemaRegion = 2
+
+ _VALUES_TO_NAMES = {
+ 0: "ConfigNodeRegion",
+ 1: "DataRegion",
+ 2: "SchemaRegion",
+ }
+
+ _NAMES_TO_VALUES = {
+ "ConfigNodeRegion": 0,
+ "DataRegion": 1,
+ "SchemaRegion": 2,
+ }
+
+
+class TRegionMigrateFailedType(object):
+ AddPeerFailed = 0
+ RemovePeerFailed = 1
+ RemoveConsensusGroupFailed = 2
+ DeleteRegionFailed = 3
+ CreateRegionFailed = 4
+
+ _VALUES_TO_NAMES = {
+ 0: "AddPeerFailed",
+ 1: "RemovePeerFailed",
+ 2: "RemoveConsensusGroupFailed",
+ 3: "DeleteRegionFailed",
+ 4: "CreateRegionFailed",
+ }
+
+ _NAMES_TO_VALUES = {
+ "AddPeerFailed": 0,
+ "RemovePeerFailed": 1,
+ "RemoveConsensusGroupFailed": 2,
+ "DeleteRegionFailed": 3,
+ "CreateRegionFailed": 4,
+ }
+
+
+class TEndPoint(object):
+ """
+ Attributes:
+ - ip
+ - port
+
+ """
+
+
+ def __init__(self, ip=None, port=None,):
+ self.ip = ip
+ self.port = port
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.ip = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.port = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TEndPoint')
+ if self.ip is not None:
+ oprot.writeFieldBegin('ip', TType.STRING, 1)
+ oprot.writeString(self.ip.encode('utf-8') if sys.version_info[0] == 2 else self.ip)
+ oprot.writeFieldEnd()
+ if self.port is not None:
+ oprot.writeFieldBegin('port', TType.I32, 2)
+ oprot.writeI32(self.port)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.ip is None:
+ raise TProtocolException(message='Required field ip is unset!')
+ if self.port is None:
+ raise TProtocolException(message='Required field port is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TSStatus(object):
+ """
+ Attributes:
+ - code
+ - message
+ - subStatus
+ - redirectNode
+
+ """
+
+
+ def __init__(self, code=None, message=None, subStatus=None, redirectNode=None,):
+ self.code = code
+ self.message = message
+ self.subStatus = subStatus
+ self.redirectNode = redirectNode
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.I32:
+ self.code = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.LIST:
+ self.subStatus = []
+ (_etype3, _size0) = iprot.readListBegin()
+ for _i4 in range(_size0):
+ _elem5 = TSStatus()
+ _elem5.read(iprot)
+ self.subStatus.append(_elem5)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRUCT:
+ self.redirectNode = TEndPoint()
+ self.redirectNode.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TSStatus')
+ if self.code is not None:
+ oprot.writeFieldBegin('code', TType.I32, 1)
+ oprot.writeI32(self.code)
+ oprot.writeFieldEnd()
+ if self.message is not None:
+ oprot.writeFieldBegin('message', TType.STRING, 2)
+ oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
+ oprot.writeFieldEnd()
+ if self.subStatus is not None:
+ oprot.writeFieldBegin('subStatus', TType.LIST, 3)
+ oprot.writeListBegin(TType.STRUCT, len(self.subStatus))
+ for iter6 in self.subStatus:
+ iter6.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ if self.redirectNode is not None:
+ oprot.writeFieldBegin('redirectNode', TType.STRUCT, 4)
+ self.redirectNode.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.code is None:
+ raise TProtocolException(message='Required field code is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TConsensusGroupId(object):
+ """
+ Attributes:
+ - type
+ - id
+
+ """
+
+
+ def __init__(self, type=None, id=None,):
+ self.type = type
+ self.id = id
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.I32:
+ self.type = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.id = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TConsensusGroupId')
+ if self.type is not None:
+ oprot.writeFieldBegin('type', TType.I32, 1)
+ oprot.writeI32(self.type)
+ oprot.writeFieldEnd()
+ if self.id is not None:
+ oprot.writeFieldBegin('id', TType.I32, 2)
+ oprot.writeI32(self.id)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.type is None:
+ raise TProtocolException(message='Required field type is unset!')
+ if self.id is None:
+ raise TProtocolException(message='Required field id is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TSeriesPartitionSlot(object):
+ """
+ Attributes:
+ - slotId
+
+ """
+
+
+ def __init__(self, slotId=None,):
+ self.slotId = slotId
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.I32:
+ self.slotId = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TSeriesPartitionSlot')
+ if self.slotId is not None:
+ oprot.writeFieldBegin('slotId', TType.I32, 1)
+ oprot.writeI32(self.slotId)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.slotId is None:
+ raise TProtocolException(message='Required field slotId is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TTimePartitionSlot(object):
+ """
+ Attributes:
+ - startTime
+
+ """
+
+
+ def __init__(self, startTime=None,):
+ self.startTime = startTime
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.I64:
+ self.startTime = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TTimePartitionSlot')
+ if self.startTime is not None:
+ oprot.writeFieldBegin('startTime', TType.I64, 1)
+ oprot.writeI64(self.startTime)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.startTime is None:
+ raise TProtocolException(message='Required field startTime is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TRegionReplicaSet(object):
+ """
+ Attributes:
+ - regionId
+ - dataNodeLocations
+
+ """
+
+
+ def __init__(self, regionId=None, dataNodeLocations=None,):
+ self.regionId = regionId
+ self.dataNodeLocations = dataNodeLocations
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.regionId = TConsensusGroupId()
+ self.regionId.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.LIST:
+ self.dataNodeLocations = []
+ (_etype10, _size7) = iprot.readListBegin()
+ for _i11 in range(_size7):
+ _elem12 = TDataNodeLocation()
+ _elem12.read(iprot)
+ self.dataNodeLocations.append(_elem12)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TRegionReplicaSet')
+ if self.regionId is not None:
+ oprot.writeFieldBegin('regionId', TType.STRUCT, 1)
+ self.regionId.write(oprot)
+ oprot.writeFieldEnd()
+ if self.dataNodeLocations is not None:
+ oprot.writeFieldBegin('dataNodeLocations', TType.LIST, 2)
+ oprot.writeListBegin(TType.STRUCT, len(self.dataNodeLocations))
+ for iter13 in self.dataNodeLocations:
+ iter13.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.regionId is None:
+ raise TProtocolException(message='Required field regionId is unset!')
+ if self.dataNodeLocations is None:
+ raise TProtocolException(message='Required field dataNodeLocations is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TNodeResource(object):
+ """
+ Attributes:
+ - cpuCoreNum
+ - maxMemory
+
+ """
+
+
+ def __init__(self, cpuCoreNum=None, maxMemory=None,):
+ self.cpuCoreNum = cpuCoreNum
+ self.maxMemory = maxMemory
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.I32:
+ self.cpuCoreNum = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I64:
+ self.maxMemory = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TNodeResource')
+ if self.cpuCoreNum is not None:
+ oprot.writeFieldBegin('cpuCoreNum', TType.I32, 1)
+ oprot.writeI32(self.cpuCoreNum)
+ oprot.writeFieldEnd()
+ if self.maxMemory is not None:
+ oprot.writeFieldBegin('maxMemory', TType.I64, 2)
+ oprot.writeI64(self.maxMemory)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.cpuCoreNum is None:
+ raise TProtocolException(message='Required field cpuCoreNum is unset!')
+ if self.maxMemory is None:
+ raise TProtocolException(message='Required field maxMemory is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TConfigNodeLocation(object):
+ """
+ Attributes:
+ - configNodeId
+ - internalEndPoint
+ - consensusEndPoint
+
+ """
+
+
+ def __init__(self, configNodeId=None, internalEndPoint=None, consensusEndPoint=None,):
+ self.configNodeId = configNodeId
+ self.internalEndPoint = internalEndPoint
+ self.consensusEndPoint = consensusEndPoint
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.I32:
+ self.configNodeId = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.internalEndPoint = TEndPoint()
+ self.internalEndPoint.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.consensusEndPoint = TEndPoint()
+ self.consensusEndPoint.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TConfigNodeLocation')
+ if self.configNodeId is not None:
+ oprot.writeFieldBegin('configNodeId', TType.I32, 1)
+ oprot.writeI32(self.configNodeId)
+ oprot.writeFieldEnd()
+ if self.internalEndPoint is not None:
+ oprot.writeFieldBegin('internalEndPoint', TType.STRUCT, 2)
+ self.internalEndPoint.write(oprot)
+ oprot.writeFieldEnd()
+ if self.consensusEndPoint is not None:
+ oprot.writeFieldBegin('consensusEndPoint', TType.STRUCT, 3)
+ self.consensusEndPoint.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.configNodeId is None:
+ raise TProtocolException(message='Required field configNodeId is unset!')
+ if self.internalEndPoint is None:
+ raise TProtocolException(message='Required field internalEndPoint is unset!')
+ if self.consensusEndPoint is None:
+ raise TProtocolException(message='Required field consensusEndPoint is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TDataNodeLocation(object):
+ """
+ Attributes:
+ - dataNodeId
+ - clientRpcEndPoint
+ - internalEndPoint
+ - mPPDataExchangeEndPoint
+ - dataRegionConsensusEndPoint
+ - schemaRegionConsensusEndPoint
+
+ """
+
+
+ def __init__(self, dataNodeId=None, clientRpcEndPoint=None, internalEndPoint=None, mPPDataExchangeEndPoint=None, dataRegionConsensusEndPoint=None, schemaRegionConsensusEndPoint=None,):
+ self.dataNodeId = dataNodeId
+ self.clientRpcEndPoint = clientRpcEndPoint
+ self.internalEndPoint = internalEndPoint
+ self.mPPDataExchangeEndPoint = mPPDataExchangeEndPoint
+ self.dataRegionConsensusEndPoint = dataRegionConsensusEndPoint
+ self.schemaRegionConsensusEndPoint = schemaRegionConsensusEndPoint
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.I32:
+ self.dataNodeId = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.clientRpcEndPoint = TEndPoint()
+ self.clientRpcEndPoint.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.internalEndPoint = TEndPoint()
+ self.internalEndPoint.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRUCT:
+ self.mPPDataExchangeEndPoint = TEndPoint()
+ self.mPPDataExchangeEndPoint.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.STRUCT:
+ self.dataRegionConsensusEndPoint = TEndPoint()
+ self.dataRegionConsensusEndPoint.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.STRUCT:
+ self.schemaRegionConsensusEndPoint = TEndPoint()
+ self.schemaRegionConsensusEndPoint.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TDataNodeLocation')
+ if self.dataNodeId is not None:
+ oprot.writeFieldBegin('dataNodeId', TType.I32, 1)
+ oprot.writeI32(self.dataNodeId)
+ oprot.writeFieldEnd()
+ if self.clientRpcEndPoint is not None:
+ oprot.writeFieldBegin('clientRpcEndPoint', TType.STRUCT, 2)
+ self.clientRpcEndPoint.write(oprot)
+ oprot.writeFieldEnd()
+ if self.internalEndPoint is not None:
+ oprot.writeFieldBegin('internalEndPoint', TType.STRUCT, 3)
+ self.internalEndPoint.write(oprot)
+ oprot.writeFieldEnd()
+ if self.mPPDataExchangeEndPoint is not None:
+ oprot.writeFieldBegin('mPPDataExchangeEndPoint', TType.STRUCT, 4)
+ self.mPPDataExchangeEndPoint.write(oprot)
+ oprot.writeFieldEnd()
+ if self.dataRegionConsensusEndPoint is not None:
+ oprot.writeFieldBegin('dataRegionConsensusEndPoint', TType.STRUCT, 5)
+ self.dataRegionConsensusEndPoint.write(oprot)
+ oprot.writeFieldEnd()
+ if self.schemaRegionConsensusEndPoint is not None:
+ oprot.writeFieldBegin('schemaRegionConsensusEndPoint', TType.STRUCT, 6)
+ self.schemaRegionConsensusEndPoint.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.dataNodeId is None:
+ raise TProtocolException(message='Required field dataNodeId is unset!')
+ if self.clientRpcEndPoint is None:
+ raise TProtocolException(message='Required field clientRpcEndPoint is unset!')
+ if self.internalEndPoint is None:
+ raise TProtocolException(message='Required field internalEndPoint is unset!')
+ if self.mPPDataExchangeEndPoint is None:
+ raise TProtocolException(message='Required field mPPDataExchangeEndPoint is unset!')
+ if self.dataRegionConsensusEndPoint is None:
+ raise TProtocolException(message='Required field dataRegionConsensusEndPoint is unset!')
+ if self.schemaRegionConsensusEndPoint is None:
+ raise TProtocolException(message='Required field schemaRegionConsensusEndPoint is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TDataNodeConfiguration(object):
+ """
+ Attributes:
+ - location
+ - resource
+
+ """
+
+
+ def __init__(self, location=None, resource=None,):
+ self.location = location
+ self.resource = resource
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.location = TDataNodeLocation()
+ self.location.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.resource = TNodeResource()
+ self.resource.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TDataNodeConfiguration')
+ if self.location is not None:
+ oprot.writeFieldBegin('location', TType.STRUCT, 1)
+ self.location.write(oprot)
+ oprot.writeFieldEnd()
+ if self.resource is not None:
+ oprot.writeFieldBegin('resource', TType.STRUCT, 2)
+ self.resource.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.location is None:
+ raise TProtocolException(message='Required field location is unset!')
+ if self.resource is None:
+ raise TProtocolException(message='Required field resource is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TFlushReq(object):
+ """
+ Attributes:
+ - isSeq
+ - storageGroups
+
+ """
+
+
+ def __init__(self, isSeq=None, storageGroups=None,):
+ self.isSeq = isSeq
+ self.storageGroups = storageGroups
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.isSeq = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.LIST:
+ self.storageGroups = []
+ (_etype17, _size14) = iprot.readListBegin()
+ for _i18 in range(_size14):
+ _elem19 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
+ self.storageGroups.append(_elem19)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TFlushReq')
+ if self.isSeq is not None:
+ oprot.writeFieldBegin('isSeq', TType.STRING, 1)
+ oprot.writeString(self.isSeq.encode('utf-8') if sys.version_info[0] == 2 else self.isSeq)
+ oprot.writeFieldEnd()
+ if self.storageGroups is not None:
+ oprot.writeFieldBegin('storageGroups', TType.LIST, 2)
+ oprot.writeListBegin(TType.STRING, len(self.storageGroups))
+ for iter20 in self.storageGroups:
+ oprot.writeString(iter20.encode('utf-8') if sys.version_info[0] == 2 else iter20)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TSchemaNode(object):
+ """
+ Attributes:
+ - nodeName
+ - nodeType
+
+ """
+
+
+ def __init__(self, nodeName=None, nodeType=None,):
+ self.nodeName = nodeName
+ self.nodeType = nodeType
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.nodeName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.BYTE:
+ self.nodeType = iprot.readByte()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TSchemaNode')
+ if self.nodeName is not None:
+ oprot.writeFieldBegin('nodeName', TType.STRING, 1)
+ oprot.writeString(self.nodeName.encode('utf-8') if sys.version_info[0] == 2 else self.nodeName)
+ oprot.writeFieldEnd()
+ if self.nodeType is not None:
+ oprot.writeFieldBegin('nodeType', TType.BYTE, 2)
+ oprot.writeByte(self.nodeType)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.nodeName is None:
+ raise TProtocolException(message='Required field nodeName is unset!')
+ if self.nodeType is None:
+ raise TProtocolException(message='Required field nodeType is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TSetTTLReq(object):
+ """
+ Attributes:
+ - storageGroupPathPattern
+ - TTL
+
+ """
+
+
+ def __init__(self, storageGroupPathPattern=None, TTL=None,):
+ self.storageGroupPathPattern = storageGroupPathPattern
+ self.TTL = TTL
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.LIST:
+ self.storageGroupPathPattern = []
+ (_etype24, _size21) = iprot.readListBegin()
+ for _i25 in range(_size21):
+ _elem26 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
+ self.storageGroupPathPattern.append(_elem26)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I64:
+ self.TTL = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TSetTTLReq')
+ if self.storageGroupPathPattern is not None:
+ oprot.writeFieldBegin('storageGroupPathPattern', TType.LIST, 1)
+ oprot.writeListBegin(TType.STRING, len(self.storageGroupPathPattern))
+ for iter27 in self.storageGroupPathPattern:
+ oprot.writeString(iter27.encode('utf-8') if sys.version_info[0] == 2 else iter27)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ if self.TTL is not None:
+ oprot.writeFieldBegin('TTL', TType.I64, 2)
+ oprot.writeI64(self.TTL)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.storageGroupPathPattern is None:
+ raise TProtocolException(message='Required field storageGroupPathPattern is unset!')
+ if self.TTL is None:
+ raise TProtocolException(message='Required field TTL is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TFile(object):
+ """
+ Attributes:
+ - fileName
+ - file
+
+ """
+
+
+ def __init__(self, fileName=None, file=None,):
+ self.fileName = fileName
+ self.file = file
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.fileName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.file = iprot.readBinary()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TFile')
+ if self.fileName is not None:
+ oprot.writeFieldBegin('fileName', TType.STRING, 1)
+ oprot.writeString(self.fileName.encode('utf-8') if sys.version_info[0] == 2 else self.fileName)
+ oprot.writeFieldEnd()
+ if self.file is not None:
+ oprot.writeFieldBegin('file', TType.STRING, 2)
+ oprot.writeBinary(self.file)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.fileName is None:
+ raise TProtocolException(message='Required field fileName is unset!')
+ if self.file is None:
+ raise TProtocolException(message='Required field file is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class TFilesResp(object):
+ """
+ Attributes:
+ - status
+ - files
+
+ """
+
+
+ def __init__(self, status=None, files=None,):
+ self.status = status
+ self.files = files
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.status = TSStatus()
+ self.status.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.LIST:
+ self.files = []
+ (_etype31, _size28) = iprot.readListBegin()
+ for _i32 in range(_size28):
+ _elem33 = TFile()
+ _elem33.read(iprot)
+ self.files.append(_elem33)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('TFilesResp')
+ if self.status is not None:
+ oprot.writeFieldBegin('status', TType.STRUCT, 1)
+ self.status.write(oprot)
+ oprot.writeFieldEnd()
+ if self.files is not None:
+ oprot.writeFieldBegin('files', TType.LIST, 2)
+ oprot.writeListBegin(TType.STRUCT, len(self.files))
+ for iter34 in self.files:
+ iter34.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.status is None:
+ raise TProtocolException(message='Required field status is unset!')
+ if self.files is None:
+ raise TProtocolException(message='Required field files is unset!')
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(TEndPoint)
+TEndPoint.thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'ip', 'UTF8', None, ), # 1
+ (2, TType.I32, 'port', None, None, ), # 2
+)
+all_structs.append(TSStatus)
+TSStatus.thrift_spec = (
+ None, # 0
+ (1, TType.I32, 'code', None, None, ), # 1
+ (2, TType.STRING, 'message', 'UTF8', None, ), # 2
+ (3, TType.LIST, 'subStatus', (TType.STRUCT, [TSStatus, None], False), None, ), # 3
+ (4, TType.STRUCT, 'redirectNode', [TEndPoint, None], None, ), # 4
+)
+all_structs.append(TConsensusGroupId)
+TConsensusGroupId.thrift_spec = (
+ None, # 0
+ (1, TType.I32, 'type', None, None, ), # 1
+ (2, TType.I32, 'id', None, None, ), # 2
+)
+all_structs.append(TSeriesPartitionSlot)
+TSeriesPartitionSlot.thrift_spec = (
+ None, # 0
+ (1, TType.I32, 'slotId', None, None, ), # 1
+)
+all_structs.append(TTimePartitionSlot)
+TTimePartitionSlot.thrift_spec = (
+ None, # 0
+ (1, TType.I64, 'startTime', None, None, ), # 1
+)
+all_structs.append(TRegionReplicaSet)
+TRegionReplicaSet.thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'regionId', [TConsensusGroupId, None], None, ), # 1
+ (2, TType.LIST, 'dataNodeLocations', (TType.STRUCT, [TDataNodeLocation, None], False), None, ), # 2
+)
+all_structs.append(TNodeResource)
+TNodeResource.thrift_spec = (
+ None, # 0
+ (1, TType.I32, 'cpuCoreNum', None, None, ), # 1
+ (2, TType.I64, 'maxMemory', None, None, ), # 2
+)
+all_structs.append(TConfigNodeLocation)
+TConfigNodeLocation.thrift_spec = (
+ None, # 0
+ (1, TType.I32, 'configNodeId', None, None, ), # 1
+ (2, TType.STRUCT, 'internalEndPoint', [TEndPoint, None], None, ), # 2
+ (3, TType.STRUCT, 'consensusEndPoint', [TEndPoint, None], None, ), # 3
+)
+all_structs.append(TDataNodeLocation)
+TDataNodeLocation.thrift_spec = (
+ None, # 0
+ (1, TType.I32, 'dataNodeId', None, None, ), # 1
+ (2, TType.STRUCT, 'clientRpcEndPoint', [TEndPoint, None], None, ), # 2
+ (3, TType.STRUCT, 'internalEndPoint', [TEndPoint, None], None, ), # 3
+ (4, TType.STRUCT, 'mPPDataExchangeEndPoint', [TEndPoint, None], None, ), # 4
+ (5, TType.STRUCT, 'dataRegionConsensusEndPoint', [TEndPoint, None], None, ), # 5
+ (6, TType.STRUCT, 'schemaRegionConsensusEndPoint', [TEndPoint, None], None, ), # 6
+)
+all_structs.append(TDataNodeConfiguration)
+TDataNodeConfiguration.thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'location', [TDataNodeLocation, None], None, ), # 1
+ (2, TType.STRUCT, 'resource', [TNodeResource, None], None, ), # 2
+)
+all_structs.append(TFlushReq)
+TFlushReq.thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'isSeq', 'UTF8', None, ), # 1
+ (2, TType.LIST, 'storageGroups', (TType.STRING, 'UTF8', False), None, ), # 2
+)
+all_structs.append(TSchemaNode)
+TSchemaNode.thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'nodeName', 'UTF8', None, ), # 1
+ (2, TType.BYTE, 'nodeType', None, None, ), # 2
+)
+all_structs.append(TSetTTLReq)
+TSetTTLReq.thrift_spec = (
+ None, # 0
+ (1, TType.LIST, 'storageGroupPathPattern', (TType.STRING, 'UTF8', False), None, ), # 1
+ (2, TType.I64, 'TTL', None, None, ), # 2
+)
+all_structs.append(TFile)
+TFile.thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'fileName', 'UTF8', None, ), # 1
+ (2, TType.STRING, 'file', 'BINARY', None, ), # 2
+)
+all_structs.append(TFilesResp)
+TFilesResp.thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'status', [TSStatus, None], None, ), # 1
+ (2, TType.LIST, 'files', (TType.STRUCT, [TFile, None], False), None, ), # 2
+)
+fix_spec(all_structs)
+del all_structs
diff --git a/mlnode/iotdb/thrift/confignode/IConfigNodeRPCService-remote b/mlnode/iotdb/thrift/confignode/IConfigNodeRPCService-remote
new file mode 100644
index 0000000000..3056966329
--- /dev/null
+++ b/mlnode/iotdb/thrift/confignode/IConfigNodeRPCService-remote
@@ -0,0 +1,733 @@
+#!/usr/bin/env python
+#
+# Autogenerated by Thrift Compiler (0.14.1)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+# options string: py
+#
+
+import sys
+import pprint
+if sys.version_info[0] > 2:
+ from urllib.parse import urlparse
+else:
+ from urlparse import urlparse
+from thrift.transport import TTransport, TSocket, TSSLSocket, THttpClient
+from thrift.protocol.TBinaryProtocol import TBinaryProtocol
+
+from iotdb.thrift.confignode import IConfigNodeRPCService
+from iotdb.thrift.confignode.ttypes import *
+
+if len(sys.argv) <= 1 or sys.argv[1] == '--help':
+ print('')
+ print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] [-s[sl]] [-novalidate] [-ca_certs certs] [-keyfile keyfile] [-certfile certfile] function [arg1 [arg2...]]')
+ print('')
+ print('Functions:')
+ print(' TDataNodeRegisterResp registerDataNode(TDataNodeRegisterReq req)')
+ print(' TDataNodeRestartResp restartDataNode(TDataNodeRestartReq req)')
+ print(' TSystemConfigurationResp getSystemConfiguration()')
+ print(' TDataNodeRemoveResp removeDataNode(TDataNodeRemoveReq req)')
+ print(' TDataNodeRegisterResp updateDataNode(TDataNodeUpdateReq req)')
+ print(' TDataNodeConfigurationResp getDataNodeConfiguration(i32 dataNodeId)')
+ print(' TSStatus reportRegionMigrateResult(TRegionMigrateResultReportReq req)')
+ print(' TSStatus setStorageGroup(TSetStorageGroupReq req)')
+ print(' TSStatus deleteStorageGroup(TDeleteStorageGroupReq req)')
+ print(' TSStatus deleteStorageGroups(TDeleteStorageGroupsReq req)')
+ print(' TSStatus setTTL(TSetTTLReq req)')
+ print(' TSStatus setSchemaReplicationFactor(TSetSchemaReplicationFactorReq req)')
+ print(' TSStatus setDataReplicationFactor(TSetDataReplicationFactorReq req)')
+ print(' TSStatus setTimePartitionInterval(TSetTimePartitionIntervalReq req)')
+ print(' TCountStorageGroupResp countMatchedStorageGroups( storageGroupPathPattern)')
+ print(' TStorageGroupSchemaResp getMatchedStorageGroupSchemas( storageGroupPathPattern)')
+ print(' TSchemaPartitionTableResp getSchemaPartitionTable(TSchemaPartitionReq req)')
+ print(' TSchemaPartitionTableResp getOrCreateSchemaPartitionTable(TSchemaPartitionReq req)')
+ print(' TSchemaNodeManagementResp getSchemaNodeManagementPartition(TSchemaNodeManagementReq req)')
+ print(' TDataPartitionTableResp getDataPartitionTable(TDataPartitionReq req)')
+ print(' TDataPartitionTableResp getOrCreateDataPartitionTable(TDataPartitionReq req)')
+ print(' TSStatus operatePermission(TAuthorizerReq req)')
+ print(' TAuthorizerResp queryPermission(TAuthorizerReq req)')
+ print(' TPermissionInfoResp login(TLoginReq req)')
+ print(' TPermissionInfoResp checkUserPrivileges(TCheckUserPrivilegesReq req)')
+ print(' TConfigNodeRegisterResp registerConfigNode(TConfigNodeRegisterReq req)')
+ print(' TSStatus addConsensusGroup(TAddConsensusGroupReq req)')
+ print(' TSStatus notifyRegisterSuccess()')
+ print(' TSStatus restartConfigNode(TConfigNodeRestartReq req)')
+ print(' TSStatus removeConfigNode(TConfigNodeLocation configNodeLocation)')
+ print(' TSStatus deleteConfigNodePeer(TConfigNodeLocation configNodeLocation)')
+ print(' TSStatus stopConfigNode(TConfigNodeLocation configNodeLocation)')
+ print(' i64 getConfigNodeHeartBeat(i64 timestamp)')
+ print(' TSStatus createFunction(TCreateFunctionReq req)')
+ print(' TSStatus dropFunction(TDropFunctionReq req)')
+ print(' TGetUDFTableResp getUDFTable()')
+ print(' TGetJarInListResp getUDFJar(TGetJarInListReq req)')
+ print(' TSStatus createTrigger(TCreateTriggerReq req)')
+ print(' TSStatus dropTrigger(TDropTriggerReq req)')
+ print(' TGetLocationForTriggerResp getLocationOfStatefulTrigger(string triggerName)')
+ print(' TGetTriggerTableResp getTriggerTable()')
+ print(' TGetTriggerTableResp getStatefulTriggerTable()')
+ print(' TGetJarInListResp getTriggerJar(TGetJarInListReq req)')
+ print(' TSStatus merge()')
+ print(' TSStatus flush(TFlushReq req)')
+ print(' TSStatus clearCache()')
+ print(' TSStatus loadConfiguration()')
+ print(' TSStatus setSystemStatus(string status)')
+ print(' TSStatus setDataNodeStatus(TSetDataNodeStatusReq req)')
+ print(' TSStatus migrateRegion(TMigrateRegionReq req)')
+ print(' TSStatus killQuery(string queryId, i32 dataNodeId)')
+ print(' TGetDataNodeLocationsResp getRunningDataNodeLocations()')
+ print(' TShowClusterResp showCluster()')
+ print(' TShowVariablesResp showVariables()')
+ print(' TShowDataNodesResp showDataNodes()')
+ print(' TShowConfigNodesResp showConfigNodes()')
+ print(' TShowStorageGroupResp showStorageGroup( storageGroupPathPattern)')
+ print(' TShowRegionResp showRegion(TShowRegionReq req)')
+ print(' TRegionRouteMapResp getLatestRegionRouteMap()')
+ print(' TSStatus createSchemaTemplate(TCreateSchemaTemplateReq req)')
+ print(' TGetAllTemplatesResp getAllTemplates()')
+ print(' TGetTemplateResp getTemplate(string req)')
+ print(' TSStatus setSchemaTemplate(TSetSchemaTemplateReq req)')
+ print(' TGetPathsSetTemplatesResp getPathsSetTemplate(string req)')
+ print(' TSStatus deactivateSchemaTemplate(TDeactivateSchemaTemplateReq req)')
+ print(' TSStatus unsetSchemaTemplate(TUnsetSchemaTemplateReq req)')
+ print(' TSStatus dropSchemaTemplate(string req)')
+ print(' TSStatus deleteTimeSeries(TDeleteTimeSeriesReq req)')
+ print(' TSStatus createPipeSink(TPipeSinkInfo req)')
+ print(' TSStatus dropPipeSink(TDropPipeSinkReq req)')
+ print(' TGetPipeSinkResp getPipeSink(TGetPipeSinkReq req)')
+ print(' TSStatus createPipe(TCreatePipeReq req)')
+ print(' TSStatus startPipe(string pipeName)')
+ print(' TSStatus stopPipe(string pipeName)')
+ print(' TSStatus dropPipe(string pipeName)')
+ print(' TShowPipeResp showPipe(TShowPipeReq req)')
+ print(' TGetAllPipeInfoResp getAllPipeInfo()')
+ print(' TSStatus recordPipeMessage(TRecordPipeMessageReq req)')
+ print(' TGetRegionIdResp getRegionId(TGetRegionIdReq req)')
+ print(' TGetTimeSlotListResp getTimeSlotList(TGetTimeSlotListReq req)')
+ print(' TGetSeriesSlotListResp getSeriesSlotList(TGetSeriesSlotListReq req)')
+ print(' TSStatus createCQ(TCreateCQReq req)')
+ print(' TSStatus dropCQ(TDropCQReq req)')
+ print(' TShowCQResp showCQ()')
+ print(' TSStatus createModel(TCreateModelReq req)')
+ print(' TSStatus dropModel(TDropModelReq req)')
+ print(' TShowModelResp showModel(TShowModelReq req)')
+ print(' TShowTrailResp showTrail(TShowTrailReq req)')
+ print(' TSStatus updateModelInfo(TUpdateModelInfoReq req)')
+ print('')
+ sys.exit(0)
+
+pp = pprint.PrettyPrinter(indent=2)
+host = 'localhost'
+port = 9090
+uri = ''
+framed = False
+ssl = False
+validate = True
+ca_certs = None
+keyfile = None
+certfile = None
+http = False
+argi = 1
+
+if sys.argv[argi] == '-h':
+ parts = sys.argv[argi + 1].split(':')
+ host = parts[0]
+ if len(parts) > 1:
+ port = int(parts[1])
+ argi += 2
+
+if sys.argv[argi] == '-u':
+ url = urlparse(sys.argv[argi + 1])
+ parts = url[1].split(':')
+ host = parts[0]
+ if len(parts) > 1:
+ port = int(parts[1])
+ else:
+ port = 80
+ uri = url[2]
+ if url[4]:
+ uri += '?%s' % url[4]
+ http = True
+ argi += 2
+
+if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed':
+ framed = True
+ argi += 1
+
+if sys.argv[argi] == '-s' or sys.argv[argi] == '-ssl':
+ ssl = True
+ argi += 1
+
+if sys.argv[argi] == '-novalidate':
+ validate = False
+ argi += 1
+
+if sys.argv[argi] == '-ca_certs':
+ ca_certs = sys.argv[argi+1]
+ argi += 2
+
+if sys.argv[argi] == '-keyfile':
+ keyfile = sys.argv[argi+1]
+ argi += 2
+
+if sys.argv[argi] == '-certfile':
+ certfile = sys.argv[argi+1]
+ argi += 2
+
+cmd = sys.argv[argi]
+args = sys.argv[argi + 1:]
+
+if http:
+ transport = THttpClient.THttpClient(host, port, uri)
+else:
+ if ssl:
+ socket = TSSLSocket.TSSLSocket(host, port, validate=validate, ca_certs=ca_certs, keyfile=keyfile, certfile=certfile)
+ else:
+ socket = TSocket.TSocket(host, port)
+ if framed:
+ transport = TTransport.TFramedTransport(socket)
+ else:
+ transport = TTransport.TBufferedTransport(socket)
+protocol = TBinaryProtocol(transport)
+client = IConfigNodeRPCService.Client(protocol)
+transport.open()
+
+if cmd == 'registerDataNode':
+ if len(args) != 1:
+ print('registerDataNode requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.registerDataNode(eval(args[0]),))
+
+elif cmd == 'restartDataNode':
+ if len(args) != 1:
+ print('restartDataNode requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.restartDataNode(eval(args[0]),))
+
+elif cmd == 'getSystemConfiguration':
+ if len(args) != 0:
+ print('getSystemConfiguration requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.getSystemConfiguration())
+
+elif cmd == 'removeDataNode':
+ if len(args) != 1:
+ print('removeDataNode requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.removeDataNode(eval(args[0]),))
+
+elif cmd == 'updateDataNode':
+ if len(args) != 1:
+ print('updateDataNode requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.updateDataNode(eval(args[0]),))
+
+elif cmd == 'getDataNodeConfiguration':
+ if len(args) != 1:
+ print('getDataNodeConfiguration requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getDataNodeConfiguration(eval(args[0]),))
+
+elif cmd == 'reportRegionMigrateResult':
+ if len(args) != 1:
+ print('reportRegionMigrateResult requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.reportRegionMigrateResult(eval(args[0]),))
+
+elif cmd == 'setStorageGroup':
+ if len(args) != 1:
+ print('setStorageGroup requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.setStorageGroup(eval(args[0]),))
+
+elif cmd == 'deleteStorageGroup':
+ if len(args) != 1:
+ print('deleteStorageGroup requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.deleteStorageGroup(eval(args[0]),))
+
+elif cmd == 'deleteStorageGroups':
+ if len(args) != 1:
+ print('deleteStorageGroups requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.deleteStorageGroups(eval(args[0]),))
+
+elif cmd == 'setTTL':
+ if len(args) != 1:
+ print('setTTL requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.setTTL(eval(args[0]),))
+
+elif cmd == 'setSchemaReplicationFactor':
+ if len(args) != 1:
+ print('setSchemaReplicationFactor requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.setSchemaReplicationFactor(eval(args[0]),))
+
+elif cmd == 'setDataReplicationFactor':
+ if len(args) != 1:
+ print('setDataReplicationFactor requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.setDataReplicationFactor(eval(args[0]),))
+
+elif cmd == 'setTimePartitionInterval':
+ if len(args) != 1:
+ print('setTimePartitionInterval requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.setTimePartitionInterval(eval(args[0]),))
+
+elif cmd == 'countMatchedStorageGroups':
+ if len(args) != 1:
+ print('countMatchedStorageGroups requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.countMatchedStorageGroups(eval(args[0]),))
+
+elif cmd == 'getMatchedStorageGroupSchemas':
+ if len(args) != 1:
+ print('getMatchedStorageGroupSchemas requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getMatchedStorageGroupSchemas(eval(args[0]),))
+
+elif cmd == 'getSchemaPartitionTable':
+ if len(args) != 1:
+ print('getSchemaPartitionTable requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getSchemaPartitionTable(eval(args[0]),))
+
+elif cmd == 'getOrCreateSchemaPartitionTable':
+ if len(args) != 1:
+ print('getOrCreateSchemaPartitionTable requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getOrCreateSchemaPartitionTable(eval(args[0]),))
+
+elif cmd == 'getSchemaNodeManagementPartition':
+ if len(args) != 1:
+ print('getSchemaNodeManagementPartition requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getSchemaNodeManagementPartition(eval(args[0]),))
+
+elif cmd == 'getDataPartitionTable':
+ if len(args) != 1:
+ print('getDataPartitionTable requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getDataPartitionTable(eval(args[0]),))
+
+elif cmd == 'getOrCreateDataPartitionTable':
+ if len(args) != 1:
+ print('getOrCreateDataPartitionTable requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getOrCreateDataPartitionTable(eval(args[0]),))
+
+elif cmd == 'operatePermission':
+ if len(args) != 1:
+ print('operatePermission requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.operatePermission(eval(args[0]),))
+
+elif cmd == 'queryPermission':
+ if len(args) != 1:
+ print('queryPermission requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.queryPermission(eval(args[0]),))
+
+elif cmd == 'login':
+ if len(args) != 1:
+ print('login requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.login(eval(args[0]),))
+
+elif cmd == 'checkUserPrivileges':
+ if len(args) != 1:
+ print('checkUserPrivileges requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.checkUserPrivileges(eval(args[0]),))
+
+elif cmd == 'registerConfigNode':
+ if len(args) != 1:
+ print('registerConfigNode requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.registerConfigNode(eval(args[0]),))
+
+elif cmd == 'addConsensusGroup':
+ if len(args) != 1:
+ print('addConsensusGroup requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.addConsensusGroup(eval(args[0]),))
+
+elif cmd == 'notifyRegisterSuccess':
+ if len(args) != 0:
+ print('notifyRegisterSuccess requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.notifyRegisterSuccess())
+
+elif cmd == 'restartConfigNode':
+ if len(args) != 1:
+ print('restartConfigNode requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.restartConfigNode(eval(args[0]),))
+
+elif cmd == 'removeConfigNode':
+ if len(args) != 1:
+ print('removeConfigNode requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.removeConfigNode(eval(args[0]),))
+
+elif cmd == 'deleteConfigNodePeer':
+ if len(args) != 1:
+ print('deleteConfigNodePeer requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.deleteConfigNodePeer(eval(args[0]),))
+
+elif cmd == 'stopConfigNode':
+ if len(args) != 1:
+ print('stopConfigNode requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.stopConfigNode(eval(args[0]),))
+
+elif cmd == 'getConfigNodeHeartBeat':
+ if len(args) != 1:
+ print('getConfigNodeHeartBeat requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getConfigNodeHeartBeat(eval(args[0]),))
+
+elif cmd == 'createFunction':
+ if len(args) != 1:
+ print('createFunction requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.createFunction(eval(args[0]),))
+
+elif cmd == 'dropFunction':
+ if len(args) != 1:
+ print('dropFunction requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.dropFunction(eval(args[0]),))
+
+elif cmd == 'getUDFTable':
+ if len(args) != 0:
+ print('getUDFTable requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.getUDFTable())
+
+elif cmd == 'getUDFJar':
+ if len(args) != 1:
+ print('getUDFJar requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getUDFJar(eval(args[0]),))
+
+elif cmd == 'createTrigger':
+ if len(args) != 1:
+ print('createTrigger requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.createTrigger(eval(args[0]),))
+
+elif cmd == 'dropTrigger':
+ if len(args) != 1:
+ print('dropTrigger requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.dropTrigger(eval(args[0]),))
+
+elif cmd == 'getLocationOfStatefulTrigger':
+ if len(args) != 1:
+ print('getLocationOfStatefulTrigger requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getLocationOfStatefulTrigger(args[0],))
+
+elif cmd == 'getTriggerTable':
+ if len(args) != 0:
+ print('getTriggerTable requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.getTriggerTable())
+
+elif cmd == 'getStatefulTriggerTable':
+ if len(args) != 0:
+ print('getStatefulTriggerTable requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.getStatefulTriggerTable())
+
+elif cmd == 'getTriggerJar':
+ if len(args) != 1:
+ print('getTriggerJar requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getTriggerJar(eval(args[0]),))
+
+elif cmd == 'merge':
+ if len(args) != 0:
+ print('merge requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.merge())
+
+elif cmd == 'flush':
+ if len(args) != 1:
+ print('flush requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.flush(eval(args[0]),))
+
+elif cmd == 'clearCache':
+ if len(args) != 0:
+ print('clearCache requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.clearCache())
+
+elif cmd == 'loadConfiguration':
+ if len(args) != 0:
+ print('loadConfiguration requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.loadConfiguration())
+
+elif cmd == 'setSystemStatus':
+ if len(args) != 1:
+ print('setSystemStatus requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.setSystemStatus(args[0],))
+
+elif cmd == 'setDataNodeStatus':
+ if len(args) != 1:
+ print('setDataNodeStatus requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.setDataNodeStatus(eval(args[0]),))
+
+elif cmd == 'migrateRegion':
+ if len(args) != 1:
+ print('migrateRegion requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.migrateRegion(eval(args[0]),))
+
+elif cmd == 'killQuery':
+ if len(args) != 2:
+ print('killQuery requires 2 args')
+ sys.exit(1)
+ pp.pprint(client.killQuery(args[0], eval(args[1]),))
+
+elif cmd == 'getRunningDataNodeLocations':
+ if len(args) != 0:
+ print('getRunningDataNodeLocations requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.getRunningDataNodeLocations())
+
+elif cmd == 'showCluster':
+ if len(args) != 0:
+ print('showCluster requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.showCluster())
+
+elif cmd == 'showVariables':
+ if len(args) != 0:
+ print('showVariables requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.showVariables())
+
+elif cmd == 'showDataNodes':
+ if len(args) != 0:
+ print('showDataNodes requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.showDataNodes())
+
+elif cmd == 'showConfigNodes':
+ if len(args) != 0:
+ print('showConfigNodes requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.showConfigNodes())
+
+elif cmd == 'showStorageGroup':
+ if len(args) != 1:
+ print('showStorageGroup requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.showStorageGroup(eval(args[0]),))
+
+elif cmd == 'showRegion':
+ if len(args) != 1:
+ print('showRegion requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.showRegion(eval(args[0]),))
+
+elif cmd == 'getLatestRegionRouteMap':
+ if len(args) != 0:
+ print('getLatestRegionRouteMap requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.getLatestRegionRouteMap())
+
+elif cmd == 'createSchemaTemplate':
+ if len(args) != 1:
+ print('createSchemaTemplate requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.createSchemaTemplate(eval(args[0]),))
+
+elif cmd == 'getAllTemplates':
+ if len(args) != 0:
+ print('getAllTemplates requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.getAllTemplates())
+
+elif cmd == 'getTemplate':
+ if len(args) != 1:
+ print('getTemplate requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getTemplate(args[0],))
+
+elif cmd == 'setSchemaTemplate':
+ if len(args) != 1:
+ print('setSchemaTemplate requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.setSchemaTemplate(eval(args[0]),))
+
+elif cmd == 'getPathsSetTemplate':
+ if len(args) != 1:
+ print('getPathsSetTemplate requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getPathsSetTemplate(args[0],))
+
+elif cmd == 'deactivateSchemaTemplate':
+ if len(args) != 1:
+ print('deactivateSchemaTemplate requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.deactivateSchemaTemplate(eval(args[0]),))
+
+elif cmd == 'unsetSchemaTemplate':
+ if len(args) != 1:
+ print('unsetSchemaTemplate requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.unsetSchemaTemplate(eval(args[0]),))
+
+elif cmd == 'dropSchemaTemplate':
+ if len(args) != 1:
+ print('dropSchemaTemplate requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.dropSchemaTemplate(args[0],))
+
+elif cmd == 'deleteTimeSeries':
+ if len(args) != 1:
+ print('deleteTimeSeries requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.deleteTimeSeries(eval(args[0]),))
+
+elif cmd == 'createPipeSink':
+ if len(args) != 1:
+ print('createPipeSink requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.createPipeSink(eval(args[0]),))
+
+elif cmd == 'dropPipeSink':
+ if len(args) != 1:
+ print('dropPipeSink requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.dropPipeSink(eval(args[0]),))
+
+elif cmd == 'getPipeSink':
+ if len(args) != 1:
+ print('getPipeSink requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getPipeSink(eval(args[0]),))
+
+elif cmd == 'createPipe':
+ if len(args) != 1:
+ print('createPipe requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.createPipe(eval(args[0]),))
+
+elif cmd == 'startPipe':
+ if len(args) != 1:
+ print('startPipe requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.startPipe(args[0],))
+
+elif cmd == 'stopPipe':
+ if len(args) != 1:
+ print('stopPipe requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.stopPipe(args[0],))
+
+elif cmd == 'dropPipe':
+ if len(args) != 1:
+ print('dropPipe requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.dropPipe(args[0],))
+
+elif cmd == 'showPipe':
+ if len(args) != 1:
+ print('showPipe requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.showPipe(eval(args[0]),))
+
+elif cmd == 'getAllPipeInfo':
+ if len(args) != 0:
+ print('getAllPipeInfo requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.getAllPipeInfo())
+
+elif cmd == 'recordPipeMessage':
+ if len(args) != 1:
+ print('recordPipeMessage requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.recordPipeMessage(eval(args[0]),))
+
+elif cmd == 'getRegionId':
+ if len(args) != 1:
+ print('getRegionId requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getRegionId(eval(args[0]),))
+
+elif cmd == 'getTimeSlotList':
+ if len(args) != 1:
+ print('getTimeSlotList requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getTimeSlotList(eval(args[0]),))
+
+elif cmd == 'getSeriesSlotList':
+ if len(args) != 1:
+ print('getSeriesSlotList requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.getSeriesSlotList(eval(args[0]),))
+
+elif cmd == 'createCQ':
+ if len(args) != 1:
+ print('createCQ requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.createCQ(eval(args[0]),))
+
+elif cmd == 'dropCQ':
+ if len(args) != 1:
+ print('dropCQ requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.dropCQ(eval(args[0]),))
+
+elif cmd == 'showCQ':
+ if len(args) != 0:
+ print('showCQ requires 0 args')
+ sys.exit(1)
+ pp.pprint(client.showCQ())
+
+elif cmd == 'createModel':
+ if len(args) != 1:
+ print('createModel requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.createModel(eval(args[0]),))
+
+elif cmd == 'dropModel':
+ if len(args) != 1:
+ print('dropModel requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.dropModel(eval(args[0]),))
+
+elif cmd == 'showModel':
+ if len(args) != 1:
+ print('showModel requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.showModel(eval(args[0]),))
+
+elif cmd == 'showTrail':
+ if len(args) != 1:
+ print('showTrail requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.showTrail(eval(args[0]),))
+
+elif cmd == 'updateModelInfo':
+ if len(args) != 1:
+ print('updateModelInfo requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.updateModelInfo(eval(args[0]),))
+
+else:
+ print('Unrecognized method %s' % cmd)
+ sys.exit(1)
+
+transport.close()
diff --git a/mlnode/iotdb/thrift/confignode/IConfigNodeRPCService.py b/mlnode/iotdb/thrift/confignode/IConfigNodeRPCService.py
new file mode 100644
index 0000000000..31ef37db32
--- /dev/null
+++ b/mlnode/iotdb/thrift/confignode/IConfigNodeRPCService.py
@@ -0,0 +1,16798 @@
+#
+# Autogenerated by Thrift Compiler (0.14.1)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+# options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
+from thrift.protocol.TProtocol import TProtocolException
+from thrift.TRecursive import fix_spec
+
+import sys
+import logging
+from .ttypes import *
+from thrift.Thrift import TProcessor
+from thrift.transport import TTransport
+all_structs = []
+
+
+class Iface(object):
+ def registerDataNode(self, req):
+ """
+ Register a new DataNode into the cluster
+
+ @return SUCCESS_STATUS if the new DataNode registered successfully
+ REJECT_NODE_START if the configuration chek of the DataNode to be registered fails,
+ and a detailed error message will be returned.
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def restartDataNode(self, req):
+ """
+ Restart an existed DataNode
+
+ @return SUCCESS_STATUS if DataNode restart request is accepted
+ REJECT_NODE_START if the configuration chek of the DataNode to be restarted fails,
+ and a detailed error message will be returned.
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getSystemConfiguration(self):
+ """
+ Get system configurations. i.e. configurations that is not associated with the DataNodeId
+
+ """
+ pass
+
+ def removeDataNode(self, req):
+ """
+ Generate a set of DataNodeRemoveProcedure to remove some specific DataNodes from the cluster
+
+ @return SUCCESS_STATUS if the DataNodeRemoveProcedure submitted successfully
+ LACK_REPLICATION if the number of DataNodes will be too small to maintain
+ RegionReplicas after remove these DataNodes
+ DATANODE_NOT_EXIST if one of the DataNodes in the TDataNodeRemoveReq doesn't exist in the cluster
+ NODE_DELETE_FAILED_ERROR if failed to submit the DataNodeRemoveProcedure
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def updateDataNode(self, req):
+ """
+ Update the specified DataNode‘s location in the cluster when restart
+
+ @return SUCCESS_STATUS if the DataNode updated successfully
+ DATANODE_NOT_EXIST if one of the DataNodes in the TDataNodeUpdateReq doesn't exist in the cluster
+ UPDATE_DATANODE_FAILED if failed to update the DataNode
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getDataNodeConfiguration(self, dataNodeId):
+ """
+ Get one or more DataNodes' configuration
+
+ @param dataNodeId, the specific DataNode's index
+ @return The specific DataNode's configuration if the DataNode exists,
+ or all DataNodes' configuration if dataNodeId is -1
+
+ Parameters:
+ - dataNodeId
+
+ """
+ pass
+
+ def reportRegionMigrateResult(self, req):
+ """
+ Report region migration complete
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def setStorageGroup(self, req):
+ """
+ Set a new StorageGroup, all fields in TStorageGroupSchema can be customized
+ while the undefined fields will automatically use default values
+
+ @return SUCCESS_STATUS if the new StorageGroup set successfully
+ PATH_ILLEGAL if the new StorageGroup's name is illegal
+ STORAGE_GROUP_ALREADY_EXISTS if the StorageGroup already exist
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def deleteStorageGroup(self, req):
+ """
+ Generate a DeleteStorageGroupProcedure to delete a specific StorageGroup
+
+ @return SUCCESS_STATUS if the DeleteStorageGroupProcedure submitted successfully
+ TIMESERIES_NOT_EXIST if the specific StorageGroup doesn't exist
+ EXECUTE_STATEMENT_ERROR if failed to submit the DeleteStorageGroupProcedure
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def deleteStorageGroups(self, req):
+ """
+ Generate a set of DeleteStorageGroupProcedure to delete some specific StorageGroups
+
+ @return SUCCESS_STATUS if the DeleteStorageGroupProcedure submitted successfully
+ TIMESERIES_NOT_EXIST if the specific StorageGroup doesn't exist
+ EXECUTE_STATEMENT_ERROR if failed to submit the DeleteStorageGroupProcedure
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def setTTL(self, req):
+ """
+ Update the specific StorageGroup's TTL
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def setSchemaReplicationFactor(self, req):
+ """
+ Update the specific StorageGroup's SchemaReplicationFactor
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def setDataReplicationFactor(self, req):
+ """
+ Update the specific StorageGroup's DataReplicationFactor
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def setTimePartitionInterval(self, req):
+ """
+ Update the specific StorageGroup's PartitionInterval
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def countMatchedStorageGroups(self, storageGroupPathPattern):
+ """
+ Count the matched StorageGroups
+
+ Parameters:
+ - storageGroupPathPattern
+
+ """
+ pass
+
+ def getMatchedStorageGroupSchemas(self, storageGroupPathPattern):
+ """
+ Get the matched StorageGroups' TStorageGroupSchema
+
+ Parameters:
+ - storageGroupPathPattern
+
+ """
+ pass
+
+ def getSchemaPartitionTable(self, req):
+ """
+ Get SchemaPartitionTable by specific PathPatternTree,
+ the returned SchemaPartitionTable will not contain the unallocated SeriesPartitionSlots
+ See https://apache-iotdb.feishu.cn/docs/doccnqe3PLPEKwsCX1xadXQ2JOg for detailed matching rules
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getOrCreateSchemaPartitionTable(self, req):
+ """
+ Get or create SchemaPartitionTable by specific PathPatternTree,
+ the returned SchemaPartitionTable always contains all the SeriesPartitionSlots
+ since the unallocated SeriesPartitionSlots will be allocated by the way
+
+ @return SUCCESS_STATUS if the SchemaPartitionTable got or created successfully
+ NOT_ENOUGH_DATA_NODE if the number of cluster DataNodes is not enough for creating new SchemaRegions
+ STORAGE_GROUP_NOT_EXIST if some StorageGroups don't exist
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getSchemaNodeManagementPartition(self, req):
+ """
+ Get the partition info used for schema node query and get the node info in CluterSchemaInfo.
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getDataPartitionTable(self, req):
+ """
+ Get DataPartitionTable by specific PartitionSlotsMap,
+ the returned DataPartitionTable will not contain the unallocated SeriesPartitionSlots and TimePartitionSlots
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getOrCreateDataPartitionTable(self, req):
+ """
+ Get or create DataPartitionTable by specific PartitionSlotsMap,
+ the returned SchemaPartitionTable always contains all the SeriesPartitionSlots and TimePartitionSlots
+ since the unallocated SeriesPartitionSlots and TimePartitionSlots will be allocated by the way
+
+ @return SUCCESS_STATUS if the DataPartitionTable got or created successfully
+ NOT_ENOUGH_DATA_NODE if the number of cluster DataNodes is not enough for creating new DataRegions
+ STORAGE_GROUP_NOT_EXIST if some StorageGroups don't exist
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def operatePermission(self, req):
+ """
+ Execute permission write operations such as create user, create role, and grant permission.
+ There is no need to update the cache information of the DataNode for creating users and roles
+
+ @return SUCCESS_STATUS if the permission write operation is executed successfully
+ INVALIDATE_PERMISSION_CACHE_ERROR if the update cache of the permission information in the datanode fails
+ EXECUTE_PERMISSION_EXCEPTION_ERROR if the permission write operation fails, like the user doesn't exist
+ INTERNAL_SERVER_ERROR if the permission type does not exist
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def queryPermission(self, req):
+ """
+ Execute permission read operations such as list user
+
+ @return SUCCESS_STATUS if the permission read operation is executed successfully
+ ROLE_NOT_EXIST_ERROR if the role does not exist
+ USER_NOT_EXIST_ERROR if the user does not exist
+ INTERNAL_SERVER_ERROR if the permission type does not exist
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def login(self, req):
+ """
+ Authenticate user login
+
+ @return SUCCESS_STATUS if the user exists and the correct username and password are entered
+ WRONG_LOGIN_PASSWORD_ERROR if the user enters the wrong username or password
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def checkUserPrivileges(self, req):
+ """
+ Permission checking for user operations
+
+ @return SUCCESS_STATUS if the user has the permission
+ EXECUTE_PERMISSION_EXCEPTION_ERROR if the seriesPath or the privilege is illegal.
+ NO_PERMISSION_ERROR if the user does not have this permission
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def registerConfigNode(self, req):
+ """
+ The Non-Seed-ConfigNode submit a registration request to the ConfigNode-leader when first startup
+
+ @return SUCCESS_STATUS if the AddConfigNodeProcedure submitted successfully.
+ REJECT_NODE_START if the configuration chek of the ConfigNode to be registered fails,
+ and a detailed error message will be returned.
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def addConsensusGroup(self, req):
+ """
+ The ConfigNode-leader will guide the Non-Seed-ConfigNode to join the ConsensusGroup when first startup
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def notifyRegisterSuccess(self):
+ """
+ The ConfigNode-leader will notify the Non-Seed-ConfigNode that the registration success
+
+ """
+ pass
+
+ def restartConfigNode(self, req):
+ """
+ Restart an existed ConfigNode
+
+ @return SUCCESS_STATUS if ConfigNode restart request is accepted
+ REJECT_NODE_START if the configuration chek of the ConfigNode to be restarted fails,
+ and a detailed error message will be returned.
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def removeConfigNode(self, configNodeLocation):
+ """
+ Remove the specific ConfigNode from the cluster
+
+ @return SUCCESS_STATUS if the RemoveConfigNodeProcedure submitted successfully
+ REMOVE_CONFIGNODE_FAILED if the number of ConfigNode is less than 1
+ or the specific ConfigNode doesn't exist
+ or the specific ConfigNode is leader
+
+ Parameters:
+ - configNodeLocation
+
+ """
+ pass
+
+ def deleteConfigNodePeer(self, configNodeLocation):
+ """
+ Let the specific ConfigNode delete the peer
+
+ @return SUCCESS_STATUS if delete peer successfully
+ REMOVE_CONFIGNODE_FAILED if the specific ConfigNode doesn't exist in the current cluster
+ or Ratis internal failure
+
+ Parameters:
+ - configNodeLocation
+
+ """
+ pass
+
+ def stopConfigNode(self, configNodeLocation):
+ """
+ Stop the specific ConfigNode
+
+ Parameters:
+ - configNodeLocation
+
+ """
+ pass
+
+ def getConfigNodeHeartBeat(self, timestamp):
+ """
+ The ConfigNode-leader will ping other ConfigNodes periodically
+
+ Parameters:
+ - timestamp
+
+ """
+ pass
+
+ def createFunction(self, req):
+ """
+ Create a function on all online ConfigNodes and DataNodes
+
+ @return SUCCESS_STATUS if the function was created successfully
+ EXECUTE_STATEMENT_ERROR if operations on any node failed
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def dropFunction(self, req):
+ """
+ Remove a function on all online ConfigNodes and DataNodes
+
+ @return SUCCESS_STATUS if the function was removed successfully
+ EXECUTE_STATEMENT_ERROR if operations on any node failed
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getUDFTable(self):
+ """
+ Return the UDF table
+
+ """
+ pass
+
+ def getUDFJar(self, req):
+ """
+ Return the UDF jar list of the jar name list
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def createTrigger(self, req):
+ """
+ Create a statless trigger on all online DataNodes or Create a stateful trigger on a specific DataNode
+ and sync Information of it to all ConfigNodes
+
+ @return SUCCESS_STATUS if the trigger was created successfully
+ EXECUTE_STATEMENT_ERROR if operations on any node failed
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def dropTrigger(self, req):
+ """
+ Remove a trigger on all online ConfigNodes and DataNodes
+
+ @return SUCCESS_STATUS if the function was removed successfully
+ EXECUTE_STATEMENT_ERROR if operations on any node failed
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getLocationOfStatefulTrigger(self, triggerName):
+ """
+ Get TDataNodeLocation of a stateful trigger
+
+ Parameters:
+ - triggerName
+
+ """
+ pass
+
+ def getTriggerTable(self):
+ """
+ Return the trigger table
+
+ """
+ pass
+
+ def getStatefulTriggerTable(self):
+ """
+ Return the Stateful trigger table
+
+ """
+ pass
+
+ def getTriggerJar(self, req):
+ """
+ Return the trigger jar list of the trigger name list
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def merge(self):
+ """
+ Execute Level Compaction and unsequence Compaction task on all DataNodes
+
+ """
+ pass
+
+ def flush(self, req):
+ """
+ Persist all the data points in the memory table of the database to the disk, and seal the data file on all DataNodes
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def clearCache(self):
+ """
+ Clear the cache of chunk, chunk metadata and timeseries metadata to release the memory footprint on all DataNodes
+
+ """
+ pass
+
+ def loadConfiguration(self):
+ """
+ Load configuration on all DataNodes
+
+ """
+ pass
+
+ def setSystemStatus(self, status):
+ """
+ Set system status on DataNodes
+
+ Parameters:
+ - status
+
+ """
+ pass
+
+ def setDataNodeStatus(self, req):
+ """
+ TestOnly. Set the target DataNode to the specified status
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def migrateRegion(self, req):
+ """
+ Migrate a region replica from one dataNode to another
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def killQuery(self, queryId, dataNodeId):
+ """
+ Kill query
+
+ Parameters:
+ - queryId
+ - dataNodeId
+
+ """
+ pass
+
+ def getRunningDataNodeLocations(self):
+ """
+ Get all DataNodeLocations of Running DataNodes
+
+ """
+ pass
+
+ def showCluster(self):
+ """
+ Show cluster ConfigNodes' and DataNodes' information
+
+ """
+ pass
+
+ def showVariables(self):
+ """
+ Show variables who should be consist in the same cluster
+
+ """
+ pass
+
+ def showDataNodes(self):
+ """
+ Show cluster DataNodes' information
+
+ """
+ pass
+
+ def showConfigNodes(self):
+ """
+ Show cluster ConfigNodes' information
+
+ """
+ pass
+
+ def showStorageGroup(self, storageGroupPathPattern):
+ """
+ Show cluster StorageGroups' information
+
+ Parameters:
+ - storageGroupPathPattern
+
+ """
+ pass
+
+ def showRegion(self, req):
+ """
+ Show the matched cluster Regions' information
+ See https://apache-iotdb.feishu.cn/docx/doxcnOzmIlaE2MX5tKjmYWuMSRg for detailed matching rules
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getLatestRegionRouteMap(self):
+ """
+ The ConfigNode-leader will generate and return a latest RegionRouteMap
+
+ """
+ pass
+
+ def createSchemaTemplate(self, req):
+ """
+ Create schema template
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getAllTemplates(self):
+ """
+ Get all schema template info and template set info for DataNode registeration
+
+ """
+ pass
+
+ def getTemplate(self, req):
+ """
+ Get one schema template info
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def setSchemaTemplate(self, req):
+ """
+ Set given schema template to given path
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getPathsSetTemplate(self, req):
+ """
+ Get paths setting given schema template
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def deactivateSchemaTemplate(self, req):
+ """
+ Deactivate schema template from paths matched by given pattern tree in cluster
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def unsetSchemaTemplate(self, req):
+ """
+ Unset schema template from given path
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def dropSchemaTemplate(self, req):
+ """
+ Drop schema template
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def deleteTimeSeries(self, req):
+ """
+ Generate a set of DeleteTimeSeriesProcedure to delete some specific TimeSeries
+
+ @return SUCCESS_STATUS if the DeleteTimeSeriesProcedure submitted and executed successfully
+ TIMESERIES_NOT_EXIST if the specific TimeSeries doesn't exist
+ EXECUTE_STATEMENT_ERROR if failed to submit or execute the DeleteTimeSeriesProcedure
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def createPipeSink(self, req):
+ """
+ Create PipeSink
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def dropPipeSink(self, req):
+ """
+ Drop PipeSink
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getPipeSink(self, req):
+ """
+ Get PipeSink by name, if name is empty, get all PipeSink
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def createPipe(self, req):
+ """
+ Create Pipe
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def startPipe(self, pipeName):
+ """
+ Start Pipe
+
+ Parameters:
+ - pipeName
+
+ """
+ pass
+
+ def stopPipe(self, pipeName):
+ """
+ Stop Pipe
+
+ Parameters:
+ - pipeName
+
+ """
+ pass
+
+ def dropPipe(self, pipeName):
+ """
+ Drop Pipe
+
+ Parameters:
+ - pipeName
+
+ """
+ pass
+
+ def showPipe(self, req):
+ """
+ Show Pipe by name, if name is empty, show all Pipe
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getAllPipeInfo(self):
+ pass
+
+ def recordPipeMessage(self, req):
+ """
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getRegionId(self, req):
+ """
+ Get a particular DataPartition's corresponding Regions
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getTimeSlotList(self, req):
+ """
+ Get a specific SeriesSlot's TimeSlots by start time and end time
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def getSeriesSlotList(self, req):
+ """
+ Get the given database's assigned SeriesSlots
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def createCQ(self, req):
+ """
+ Create a CQ
+
+ @return SUCCESS_STATUS if the cq was created successfully
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def dropCQ(self, req):
+ """
+ Drop a CQ
+
+ @return SUCCESS_STATUS if the CQ was removed successfully
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def showCQ(self):
+ """
+ Return the cq table of config leader
+
+ """
+ pass
+
+ def createModel(self, req):
+ """
+ Create a model
+
+ @return SUCCESS_STATUS if the model was created successfully
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def dropModel(self, req):
+ """
+ Drop a model
+
+ @return SUCCESS_STATUS if the model was removed successfully
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def showModel(self, req):
+ """
+ Return the model table
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def showTrail(self, req):
+ """
+ Return the trail table
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+ def updateModelInfo(self, req):
+ """
+ Update the model info
+
+ @return SUCCESS_STATUS if the model was removed successfully
+
+ Parameters:
+ - req
+
+ """
+ pass
+
+
+class Client(Iface):
+ def __init__(self, iprot, oprot=None):
+ self._iprot = self._oprot = iprot
+ if oprot is not None:
+ self._oprot = oprot
+ self._seqid = 0
+
+ def registerDataNode(self, req):
+ """
+ Register a new DataNode into the cluster
+
+ @return SUCCESS_STATUS if the new DataNode registered successfully
+ REJECT_NODE_START if the configuration chek of the DataNode to be registered fails,
+ and a detailed error message will be returned.
+
+ Parameters:
+ - req
+
+ """
+ self.send_registerDataNode(req)
+ return self.recv_registerDataNode()
+
+ def send_registerDataNode(self, req):
+ self._oprot.writeMessageBegin('registerDataNode', TMessageType.CALL, self._seqid)
+ args = registerDataNode_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_registerDataNode(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = registerDataNode_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "registerDataNode failed: unknown result")
+
+ def restartDataNode(self, req):
+ """
+ Restart an existed DataNode
+
+ @return SUCCESS_STATUS if DataNode restart request is accepted
+ REJECT_NODE_START if the configuration chek of the DataNode to be restarted fails,
+ and a detailed error message will be returned.
+
+ Parameters:
+ - req
+
+ """
+ self.send_restartDataNode(req)
+ return self.recv_restartDataNode()
+
+ def send_restartDataNode(self, req):
+ self._oprot.writeMessageBegin('restartDataNode', TMessageType.CALL, self._seqid)
+ args = restartDataNode_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_restartDataNode(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = restartDataNode_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "restartDataNode failed: unknown result")
+
+ def getSystemConfiguration(self):
+ """
+ Get system configurations. i.e. configurations that is not associated with the DataNodeId
+
+ """
+ self.send_getSystemConfiguration()
+ return self.recv_getSystemConfiguration()
+
+ def send_getSystemConfiguration(self):
+ self._oprot.writeMessageBegin('getSystemConfiguration', TMessageType.CALL, self._seqid)
+ args = getSystemConfiguration_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getSystemConfiguration(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getSystemConfiguration_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getSystemConfiguration failed: unknown result")
+
+ def removeDataNode(self, req):
+ """
+ Generate a set of DataNodeRemoveProcedure to remove some specific DataNodes from the cluster
+
+ @return SUCCESS_STATUS if the DataNodeRemoveProcedure submitted successfully
+ LACK_REPLICATION if the number of DataNodes will be too small to maintain
+ RegionReplicas after remove these DataNodes
+ DATANODE_NOT_EXIST if one of the DataNodes in the TDataNodeRemoveReq doesn't exist in the cluster
+ NODE_DELETE_FAILED_ERROR if failed to submit the DataNodeRemoveProcedure
+
+ Parameters:
+ - req
+
+ """
+ self.send_removeDataNode(req)
+ return self.recv_removeDataNode()
+
+ def send_removeDataNode(self, req):
+ self._oprot.writeMessageBegin('removeDataNode', TMessageType.CALL, self._seqid)
+ args = removeDataNode_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_removeDataNode(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = removeDataNode_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "removeDataNode failed: unknown result")
+
+ def updateDataNode(self, req):
+ """
+ Update the specified DataNode‘s location in the cluster when restart
+
+ @return SUCCESS_STATUS if the DataNode updated successfully
+ DATANODE_NOT_EXIST if one of the DataNodes in the TDataNodeUpdateReq doesn't exist in the cluster
+ UPDATE_DATANODE_FAILED if failed to update the DataNode
+
+ Parameters:
+ - req
+
+ """
+ self.send_updateDataNode(req)
+ return self.recv_updateDataNode()
+
+ def send_updateDataNode(self, req):
+ self._oprot.writeMessageBegin('updateDataNode', TMessageType.CALL, self._seqid)
+ args = updateDataNode_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_updateDataNode(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = updateDataNode_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "updateDataNode failed: unknown result")
+
+ def getDataNodeConfiguration(self, dataNodeId):
+ """
+ Get one or more DataNodes' configuration
+
+ @param dataNodeId, the specific DataNode's index
+ @return The specific DataNode's configuration if the DataNode exists,
+ or all DataNodes' configuration if dataNodeId is -1
+
+ Parameters:
+ - dataNodeId
+
+ """
+ self.send_getDataNodeConfiguration(dataNodeId)
+ return self.recv_getDataNodeConfiguration()
+
+ def send_getDataNodeConfiguration(self, dataNodeId):
+ self._oprot.writeMessageBegin('getDataNodeConfiguration', TMessageType.CALL, self._seqid)
+ args = getDataNodeConfiguration_args()
+ args.dataNodeId = dataNodeId
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getDataNodeConfiguration(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getDataNodeConfiguration_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getDataNodeConfiguration failed: unknown result")
+
+ def reportRegionMigrateResult(self, req):
+ """
+ Report region migration complete
+
+ Parameters:
+ - req
+
+ """
+ self.send_reportRegionMigrateResult(req)
+ return self.recv_reportRegionMigrateResult()
+
+ def send_reportRegionMigrateResult(self, req):
+ self._oprot.writeMessageBegin('reportRegionMigrateResult', TMessageType.CALL, self._seqid)
+ args = reportRegionMigrateResult_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_reportRegionMigrateResult(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = reportRegionMigrateResult_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "reportRegionMigrateResult failed: unknown result")
+
+ def setStorageGroup(self, req):
+ """
+ Set a new StorageGroup, all fields in TStorageGroupSchema can be customized
+ while the undefined fields will automatically use default values
+
+ @return SUCCESS_STATUS if the new StorageGroup set successfully
+ PATH_ILLEGAL if the new StorageGroup's name is illegal
+ STORAGE_GROUP_ALREADY_EXISTS if the StorageGroup already exist
+
+ Parameters:
+ - req
+
+ """
+ self.send_setStorageGroup(req)
+ return self.recv_setStorageGroup()
+
+ def send_setStorageGroup(self, req):
+ self._oprot.writeMessageBegin('setStorageGroup', TMessageType.CALL, self._seqid)
+ args = setStorageGroup_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_setStorageGroup(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = setStorageGroup_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "setStorageGroup failed: unknown result")
+
+ def deleteStorageGroup(self, req):
+ """
+ Generate a DeleteStorageGroupProcedure to delete a specific StorageGroup
+
+ @return SUCCESS_STATUS if the DeleteStorageGroupProcedure submitted successfully
+ TIMESERIES_NOT_EXIST if the specific StorageGroup doesn't exist
+ EXECUTE_STATEMENT_ERROR if failed to submit the DeleteStorageGroupProcedure
+
+ Parameters:
+ - req
+
+ """
+ self.send_deleteStorageGroup(req)
+ return self.recv_deleteStorageGroup()
+
+ def send_deleteStorageGroup(self, req):
+ self._oprot.writeMessageBegin('deleteStorageGroup', TMessageType.CALL, self._seqid)
+ args = deleteStorageGroup_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_deleteStorageGroup(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = deleteStorageGroup_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteStorageGroup failed: unknown result")
+
+ def deleteStorageGroups(self, req):
+ """
+ Generate a set of DeleteStorageGroupProcedure to delete some specific StorageGroups
+
+ @return SUCCESS_STATUS if the DeleteStorageGroupProcedure submitted successfully
+ TIMESERIES_NOT_EXIST if the specific StorageGroup doesn't exist
+ EXECUTE_STATEMENT_ERROR if failed to submit the DeleteStorageGroupProcedure
+
+ Parameters:
+ - req
+
+ """
+ self.send_deleteStorageGroups(req)
+ return self.recv_deleteStorageGroups()
+
+ def send_deleteStorageGroups(self, req):
+ self._oprot.writeMessageBegin('deleteStorageGroups', TMessageType.CALL, self._seqid)
+ args = deleteStorageGroups_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_deleteStorageGroups(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = deleteStorageGroups_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteStorageGroups failed: unknown result")
+
+ def setTTL(self, req):
+ """
+ Update the specific StorageGroup's TTL
+
+ Parameters:
+ - req
+
+ """
+ self.send_setTTL(req)
+ return self.recv_setTTL()
+
+ def send_setTTL(self, req):
+ self._oprot.writeMessageBegin('setTTL', TMessageType.CALL, self._seqid)
+ args = setTTL_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_setTTL(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = setTTL_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "setTTL failed: unknown result")
+
+ def setSchemaReplicationFactor(self, req):
+ """
+ Update the specific StorageGroup's SchemaReplicationFactor
+
+ Parameters:
+ - req
+
+ """
+ self.send_setSchemaReplicationFactor(req)
+ return self.recv_setSchemaReplicationFactor()
+
+ def send_setSchemaReplicationFactor(self, req):
+ self._oprot.writeMessageBegin('setSchemaReplicationFactor', TMessageType.CALL, self._seqid)
+ args = setSchemaReplicationFactor_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_setSchemaReplicationFactor(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = setSchemaReplicationFactor_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "setSchemaReplicationFactor failed: unknown result")
+
+ def setDataReplicationFactor(self, req):
+ """
+ Update the specific StorageGroup's DataReplicationFactor
+
+ Parameters:
+ - req
+
+ """
+ self.send_setDataReplicationFactor(req)
+ return self.recv_setDataReplicationFactor()
+
+ def send_setDataReplicationFactor(self, req):
+ self._oprot.writeMessageBegin('setDataReplicationFactor', TMessageType.CALL, self._seqid)
+ args = setDataReplicationFactor_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_setDataReplicationFactor(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = setDataReplicationFactor_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "setDataReplicationFactor failed: unknown result")
+
+ def setTimePartitionInterval(self, req):
+ """
+ Update the specific StorageGroup's PartitionInterval
+
+ Parameters:
+ - req
+
+ """
+ self.send_setTimePartitionInterval(req)
+ return self.recv_setTimePartitionInterval()
+
+ def send_setTimePartitionInterval(self, req):
+ self._oprot.writeMessageBegin('setTimePartitionInterval', TMessageType.CALL, self._seqid)
+ args = setTimePartitionInterval_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_setTimePartitionInterval(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = setTimePartitionInterval_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "setTimePartitionInterval failed: unknown result")
+
+ def countMatchedStorageGroups(self, storageGroupPathPattern):
+ """
+ Count the matched StorageGroups
+
+ Parameters:
+ - storageGroupPathPattern
+
+ """
+ self.send_countMatchedStorageGroups(storageGroupPathPattern)
+ return self.recv_countMatchedStorageGroups()
+
+ def send_countMatchedStorageGroups(self, storageGroupPathPattern):
+ self._oprot.writeMessageBegin('countMatchedStorageGroups', TMessageType.CALL, self._seqid)
+ args = countMatchedStorageGroups_args()
+ args.storageGroupPathPattern = storageGroupPathPattern
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_countMatchedStorageGroups(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = countMatchedStorageGroups_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "countMatchedStorageGroups failed: unknown result")
+
+ def getMatchedStorageGroupSchemas(self, storageGroupPathPattern):
+ """
+ Get the matched StorageGroups' TStorageGroupSchema
+
+ Parameters:
+ - storageGroupPathPattern
+
+ """
+ self.send_getMatchedStorageGroupSchemas(storageGroupPathPattern)
+ return self.recv_getMatchedStorageGroupSchemas()
+
+ def send_getMatchedStorageGroupSchemas(self, storageGroupPathPattern):
+ self._oprot.writeMessageBegin('getMatchedStorageGroupSchemas', TMessageType.CALL, self._seqid)
+ args = getMatchedStorageGroupSchemas_args()
+ args.storageGroupPathPattern = storageGroupPathPattern
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getMatchedStorageGroupSchemas(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getMatchedStorageGroupSchemas_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getMatchedStorageGroupSchemas failed: unknown result")
+
+ def getSchemaPartitionTable(self, req):
+ """
+ Get SchemaPartitionTable by specific PathPatternTree,
+ the returned SchemaPartitionTable will not contain the unallocated SeriesPartitionSlots
+ See https://apache-iotdb.feishu.cn/docs/doccnqe3PLPEKwsCX1xadXQ2JOg for detailed matching rules
+
+ Parameters:
+ - req
+
+ """
+ self.send_getSchemaPartitionTable(req)
+ return self.recv_getSchemaPartitionTable()
+
+ def send_getSchemaPartitionTable(self, req):
+ self._oprot.writeMessageBegin('getSchemaPartitionTable', TMessageType.CALL, self._seqid)
+ args = getSchemaPartitionTable_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getSchemaPartitionTable(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getSchemaPartitionTable_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getSchemaPartitionTable failed: unknown result")
+
+ def getOrCreateSchemaPartitionTable(self, req):
+ """
+ Get or create SchemaPartitionTable by specific PathPatternTree,
+ the returned SchemaPartitionTable always contains all the SeriesPartitionSlots
+ since the unallocated SeriesPartitionSlots will be allocated by the way
+
+ @return SUCCESS_STATUS if the SchemaPartitionTable got or created successfully
+ NOT_ENOUGH_DATA_NODE if the number of cluster DataNodes is not enough for creating new SchemaRegions
+ STORAGE_GROUP_NOT_EXIST if some StorageGroups don't exist
+
+ Parameters:
+ - req
+
+ """
+ self.send_getOrCreateSchemaPartitionTable(req)
+ return self.recv_getOrCreateSchemaPartitionTable()
+
+ def send_getOrCreateSchemaPartitionTable(self, req):
+ self._oprot.writeMessageBegin('getOrCreateSchemaPartitionTable', TMessageType.CALL, self._seqid)
+ args = getOrCreateSchemaPartitionTable_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getOrCreateSchemaPartitionTable(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getOrCreateSchemaPartitionTable_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getOrCreateSchemaPartitionTable failed: unknown result")
+
+ def getSchemaNodeManagementPartition(self, req):
+ """
+ Get the partition info used for schema node query and get the node info in CluterSchemaInfo.
+
+ Parameters:
+ - req
+
+ """
+ self.send_getSchemaNodeManagementPartition(req)
+ return self.recv_getSchemaNodeManagementPartition()
+
+ def send_getSchemaNodeManagementPartition(self, req):
+ self._oprot.writeMessageBegin('getSchemaNodeManagementPartition', TMessageType.CALL, self._seqid)
+ args = getSchemaNodeManagementPartition_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getSchemaNodeManagementPartition(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getSchemaNodeManagementPartition_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getSchemaNodeManagementPartition failed: unknown result")
+
+ def getDataPartitionTable(self, req):
+ """
+ Get DataPartitionTable by specific PartitionSlotsMap,
+ the returned DataPartitionTable will not contain the unallocated SeriesPartitionSlots and TimePartitionSlots
+
+ Parameters:
+ - req
+
+ """
+ self.send_getDataPartitionTable(req)
+ return self.recv_getDataPartitionTable()
+
+ def send_getDataPartitionTable(self, req):
+ self._oprot.writeMessageBegin('getDataPartitionTable', TMessageType.CALL, self._seqid)
+ args = getDataPartitionTable_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getDataPartitionTable(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getDataPartitionTable_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getDataPartitionTable failed: unknown result")
+
+ def getOrCreateDataPartitionTable(self, req):
+ """
+ Get or create DataPartitionTable by specific PartitionSlotsMap,
+ the returned SchemaPartitionTable always contains all the SeriesPartitionSlots and TimePartitionSlots
+ since the unallocated SeriesPartitionSlots and TimePartitionSlots will be allocated by the way
+
+ @return SUCCESS_STATUS if the DataPartitionTable got or created successfully
+ NOT_ENOUGH_DATA_NODE if the number of cluster DataNodes is not enough for creating new DataRegions
+ STORAGE_GROUP_NOT_EXIST if some StorageGroups don't exist
+
+ Parameters:
+ - req
+
+ """
+ self.send_getOrCreateDataPartitionTable(req)
+ return self.recv_getOrCreateDataPartitionTable()
+
+ def send_getOrCreateDataPartitionTable(self, req):
+ self._oprot.writeMessageBegin('getOrCreateDataPartitionTable', TMessageType.CALL, self._seqid)
+ args = getOrCreateDataPartitionTable_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getOrCreateDataPartitionTable(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getOrCreateDataPartitionTable_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getOrCreateDataPartitionTable failed: unknown result")
+
+ def operatePermission(self, req):
+ """
+ Execute permission write operations such as create user, create role, and grant permission.
+ There is no need to update the cache information of the DataNode for creating users and roles
+
+ @return SUCCESS_STATUS if the permission write operation is executed successfully
+ INVALIDATE_PERMISSION_CACHE_ERROR if the update cache of the permission information in the datanode fails
+ EXECUTE_PERMISSION_EXCEPTION_ERROR if the permission write operation fails, like the user doesn't exist
+ INTERNAL_SERVER_ERROR if the permission type does not exist
+
+ Parameters:
+ - req
+
+ """
+ self.send_operatePermission(req)
+ return self.recv_operatePermission()
+
+ def send_operatePermission(self, req):
+ self._oprot.writeMessageBegin('operatePermission', TMessageType.CALL, self._seqid)
+ args = operatePermission_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_operatePermission(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = operatePermission_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "operatePermission failed: unknown result")
+
+ def queryPermission(self, req):
+ """
+ Execute permission read operations such as list user
+
+ @return SUCCESS_STATUS if the permission read operation is executed successfully
+ ROLE_NOT_EXIST_ERROR if the role does not exist
+ USER_NOT_EXIST_ERROR if the user does not exist
+ INTERNAL_SERVER_ERROR if the permission type does not exist
+
+ Parameters:
+ - req
+
+ """
+ self.send_queryPermission(req)
+ return self.recv_queryPermission()
+
+ def send_queryPermission(self, req):
+ self._oprot.writeMessageBegin('queryPermission', TMessageType.CALL, self._seqid)
+ args = queryPermission_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_queryPermission(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = queryPermission_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "queryPermission failed: unknown result")
+
+ def login(self, req):
+ """
+ Authenticate user login
+
+ @return SUCCESS_STATUS if the user exists and the correct username and password are entered
+ WRONG_LOGIN_PASSWORD_ERROR if the user enters the wrong username or password
+
+ Parameters:
+ - req
+
+ """
+ self.send_login(req)
+ return self.recv_login()
+
+ def send_login(self, req):
+ self._oprot.writeMessageBegin('login', TMessageType.CALL, self._seqid)
+ args = login_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_login(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = login_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "login failed: unknown result")
+
+ def checkUserPrivileges(self, req):
+ """
+ Permission checking for user operations
+
+ @return SUCCESS_STATUS if the user has the permission
+ EXECUTE_PERMISSION_EXCEPTION_ERROR if the seriesPath or the privilege is illegal.
+ NO_PERMISSION_ERROR if the user does not have this permission
+
+ Parameters:
+ - req
+
+ """
+ self.send_checkUserPrivileges(req)
+ return self.recv_checkUserPrivileges()
+
+ def send_checkUserPrivileges(self, req):
+ self._oprot.writeMessageBegin('checkUserPrivileges', TMessageType.CALL, self._seqid)
+ args = checkUserPrivileges_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_checkUserPrivileges(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = checkUserPrivileges_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "checkUserPrivileges failed: unknown result")
+
+ def registerConfigNode(self, req):
+ """
+ The Non-Seed-ConfigNode submit a registration request to the ConfigNode-leader when first startup
+
+ @return SUCCESS_STATUS if the AddConfigNodeProcedure submitted successfully.
+ REJECT_NODE_START if the configuration chek of the ConfigNode to be registered fails,
+ and a detailed error message will be returned.
+
+ Parameters:
+ - req
+
+ """
+ self.send_registerConfigNode(req)
+ return self.recv_registerConfigNode()
+
+ def send_registerConfigNode(self, req):
+ self._oprot.writeMessageBegin('registerConfigNode', TMessageType.CALL, self._seqid)
+ args = registerConfigNode_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_registerConfigNode(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = registerConfigNode_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "registerConfigNode failed: unknown result")
+
+ def addConsensusGroup(self, req):
+ """
+ The ConfigNode-leader will guide the Non-Seed-ConfigNode to join the ConsensusGroup when first startup
+
+ Parameters:
+ - req
+
+ """
+ self.send_addConsensusGroup(req)
+ return self.recv_addConsensusGroup()
+
+ def send_addConsensusGroup(self, req):
+ self._oprot.writeMessageBegin('addConsensusGroup', TMessageType.CALL, self._seqid)
+ args = addConsensusGroup_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_addConsensusGroup(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = addConsensusGroup_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "addConsensusGroup failed: unknown result")
+
+ def notifyRegisterSuccess(self):
+ """
+ The ConfigNode-leader will notify the Non-Seed-ConfigNode that the registration success
+
+ """
+ self.send_notifyRegisterSuccess()
+ return self.recv_notifyRegisterSuccess()
+
+ def send_notifyRegisterSuccess(self):
+ self._oprot.writeMessageBegin('notifyRegisterSuccess', TMessageType.CALL, self._seqid)
+ args = notifyRegisterSuccess_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_notifyRegisterSuccess(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = notifyRegisterSuccess_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "notifyRegisterSuccess failed: unknown result")
+
+ def restartConfigNode(self, req):
+ """
+ Restart an existed ConfigNode
+
+ @return SUCCESS_STATUS if ConfigNode restart request is accepted
+ REJECT_NODE_START if the configuration chek of the ConfigNode to be restarted fails,
+ and a detailed error message will be returned.
+
+ Parameters:
+ - req
+
+ """
+ self.send_restartConfigNode(req)
+ return self.recv_restartConfigNode()
+
+ def send_restartConfigNode(self, req):
+ self._oprot.writeMessageBegin('restartConfigNode', TMessageType.CALL, self._seqid)
+ args = restartConfigNode_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_restartConfigNode(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = restartConfigNode_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "restartConfigNode failed: unknown result")
+
+ def removeConfigNode(self, configNodeLocation):
+ """
+ Remove the specific ConfigNode from the cluster
+
+ @return SUCCESS_STATUS if the RemoveConfigNodeProcedure submitted successfully
+ REMOVE_CONFIGNODE_FAILED if the number of ConfigNode is less than 1
+ or the specific ConfigNode doesn't exist
+ or the specific ConfigNode is leader
+
+ Parameters:
+ - configNodeLocation
+
+ """
+ self.send_removeConfigNode(configNodeLocation)
+ return self.recv_removeConfigNode()
+
+ def send_removeConfigNode(self, configNodeLocation):
+ self._oprot.writeMessageBegin('removeConfigNode', TMessageType.CALL, self._seqid)
+ args = removeConfigNode_args()
+ args.configNodeLocation = configNodeLocation
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_removeConfigNode(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = removeConfigNode_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "removeConfigNode failed: unknown result")
+
+ def deleteConfigNodePeer(self, configNodeLocation):
+ """
+ Let the specific ConfigNode delete the peer
+
+ @return SUCCESS_STATUS if delete peer successfully
+ REMOVE_CONFIGNODE_FAILED if the specific ConfigNode doesn't exist in the current cluster
+ or Ratis internal failure
+
+ Parameters:
+ - configNodeLocation
+
+ """
+ self.send_deleteConfigNodePeer(configNodeLocation)
+ return self.recv_deleteConfigNodePeer()
+
+ def send_deleteConfigNodePeer(self, configNodeLocation):
+ self._oprot.writeMessageBegin('deleteConfigNodePeer', TMessageType.CALL, self._seqid)
+ args = deleteConfigNodePeer_args()
+ args.configNodeLocation = configNodeLocation
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_deleteConfigNodePeer(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = deleteConfigNodePeer_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteConfigNodePeer failed: unknown result")
+
+ def stopConfigNode(self, configNodeLocation):
+ """
+ Stop the specific ConfigNode
+
+ Parameters:
+ - configNodeLocation
+
+ """
+ self.send_stopConfigNode(configNodeLocation)
+ return self.recv_stopConfigNode()
+
+ def send_stopConfigNode(self, configNodeLocation):
+ self._oprot.writeMessageBegin('stopConfigNode', TMessageType.CALL, self._seqid)
+ args = stopConfigNode_args()
+ args.configNodeLocation = configNodeLocation
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_stopConfigNode(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = stopConfigNode_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "stopConfigNode failed: unknown result")
+
+ def getConfigNodeHeartBeat(self, timestamp):
+ """
+ The ConfigNode-leader will ping other ConfigNodes periodically
+
+ Parameters:
+ - timestamp
+
+ """
+ self.send_getConfigNodeHeartBeat(timestamp)
+ return self.recv_getConfigNodeHeartBeat()
+
+ def send_getConfigNodeHeartBeat(self, timestamp):
+ self._oprot.writeMessageBegin('getConfigNodeHeartBeat', TMessageType.CALL, self._seqid)
+ args = getConfigNodeHeartBeat_args()
+ args.timestamp = timestamp
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getConfigNodeHeartBeat(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getConfigNodeHeartBeat_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getConfigNodeHeartBeat failed: unknown result")
+
+ def createFunction(self, req):
+ """
+ Create a function on all online ConfigNodes and DataNodes
+
+ @return SUCCESS_STATUS if the function was created successfully
+ EXECUTE_STATEMENT_ERROR if operations on any node failed
+
+ Parameters:
+ - req
+
+ """
+ self.send_createFunction(req)
+ return self.recv_createFunction()
+
+ def send_createFunction(self, req):
+ self._oprot.writeMessageBegin('createFunction', TMessageType.CALL, self._seqid)
+ args = createFunction_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_createFunction(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = createFunction_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "createFunction failed: unknown result")
+
+ def dropFunction(self, req):
+ """
+ Remove a function on all online ConfigNodes and DataNodes
+
+ @return SUCCESS_STATUS if the function was removed successfully
+ EXECUTE_STATEMENT_ERROR if operations on any node failed
+
+ Parameters:
+ - req
+
+ """
+ self.send_dropFunction(req)
+ return self.recv_dropFunction()
+
+ def send_dropFunction(self, req):
+ self._oprot.writeMessageBegin('dropFunction', TMessageType.CALL, self._seqid)
+ args = dropFunction_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_dropFunction(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = dropFunction_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "dropFunction failed: unknown result")
+
+ def getUDFTable(self):
+ """
+ Return the UDF table
+
+ """
+ self.send_getUDFTable()
+ return self.recv_getUDFTable()
+
+ def send_getUDFTable(self):
+ self._oprot.writeMessageBegin('getUDFTable', TMessageType.CALL, self._seqid)
+ args = getUDFTable_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getUDFTable(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getUDFTable_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getUDFTable failed: unknown result")
+
+ def getUDFJar(self, req):
+ """
+ Return the UDF jar list of the jar name list
+
+ Parameters:
+ - req
+
+ """
+ self.send_getUDFJar(req)
+ return self.recv_getUDFJar()
+
+ def send_getUDFJar(self, req):
+ self._oprot.writeMessageBegin('getUDFJar', TMessageType.CALL, self._seqid)
+ args = getUDFJar_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getUDFJar(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getUDFJar_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getUDFJar failed: unknown result")
+
+ def createTrigger(self, req):
+ """
+ Create a statless trigger on all online DataNodes or Create a stateful trigger on a specific DataNode
+ and sync Information of it to all ConfigNodes
+
+ @return SUCCESS_STATUS if the trigger was created successfully
+ EXECUTE_STATEMENT_ERROR if operations on any node failed
+
+ Parameters:
+ - req
+
+ """
+ self.send_createTrigger(req)
+ return self.recv_createTrigger()
+
+ def send_createTrigger(self, req):
+ self._oprot.writeMessageBegin('createTrigger', TMessageType.CALL, self._seqid)
+ args = createTrigger_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_createTrigger(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = createTrigger_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "createTrigger failed: unknown result")
+
+ def dropTrigger(self, req):
+ """
+ Remove a trigger on all online ConfigNodes and DataNodes
+
+ @return SUCCESS_STATUS if the function was removed successfully
+ EXECUTE_STATEMENT_ERROR if operations on any node failed
+
+ Parameters:
+ - req
+
+ """
+ self.send_dropTrigger(req)
+ return self.recv_dropTrigger()
+
+ def send_dropTrigger(self, req):
+ self._oprot.writeMessageBegin('dropTrigger', TMessageType.CALL, self._seqid)
+ args = dropTrigger_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_dropTrigger(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = dropTrigger_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "dropTrigger failed: unknown result")
+
+ def getLocationOfStatefulTrigger(self, triggerName):
+ """
+ Get TDataNodeLocation of a stateful trigger
+
+ Parameters:
+ - triggerName
+
+ """
+ self.send_getLocationOfStatefulTrigger(triggerName)
+ return self.recv_getLocationOfStatefulTrigger()
+
+ def send_getLocationOfStatefulTrigger(self, triggerName):
+ self._oprot.writeMessageBegin('getLocationOfStatefulTrigger', TMessageType.CALL, self._seqid)
+ args = getLocationOfStatefulTrigger_args()
+ args.triggerName = triggerName
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getLocationOfStatefulTrigger(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getLocationOfStatefulTrigger_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getLocationOfStatefulTrigger failed: unknown result")
+
+ def getTriggerTable(self):
+ """
+ Return the trigger table
+
+ """
+ self.send_getTriggerTable()
+ return self.recv_getTriggerTable()
+
+ def send_getTriggerTable(self):
+ self._oprot.writeMessageBegin('getTriggerTable', TMessageType.CALL, self._seqid)
+ args = getTriggerTable_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getTriggerTable(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getTriggerTable_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getTriggerTable failed: unknown result")
+
+ def getStatefulTriggerTable(self):
+ """
+ Return the Stateful trigger table
+
+ """
+ self.send_getStatefulTriggerTable()
+ return self.recv_getStatefulTriggerTable()
+
+ def send_getStatefulTriggerTable(self):
+ self._oprot.writeMessageBegin('getStatefulTriggerTable', TMessageType.CALL, self._seqid)
+ args = getStatefulTriggerTable_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getStatefulTriggerTable(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getStatefulTriggerTable_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getStatefulTriggerTable failed: unknown result")
+
+ def getTriggerJar(self, req):
+ """
+ Return the trigger jar list of the trigger name list
+
+ Parameters:
+ - req
+
+ """
+ self.send_getTriggerJar(req)
+ return self.recv_getTriggerJar()
+
+ def send_getTriggerJar(self, req):
+ self._oprot.writeMessageBegin('getTriggerJar', TMessageType.CALL, self._seqid)
+ args = getTriggerJar_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getTriggerJar(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getTriggerJar_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getTriggerJar failed: unknown result")
+
+ def merge(self):
+ """
+ Execute Level Compaction and unsequence Compaction task on all DataNodes
+
+ """
+ self.send_merge()
+ return self.recv_merge()
+
+ def send_merge(self):
+ self._oprot.writeMessageBegin('merge', TMessageType.CALL, self._seqid)
+ args = merge_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_merge(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = merge_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "merge failed: unknown result")
+
+ def flush(self, req):
+ """
+ Persist all the data points in the memory table of the database to the disk, and seal the data file on all DataNodes
+
+ Parameters:
+ - req
+
+ """
+ self.send_flush(req)
+ return self.recv_flush()
+
+ def send_flush(self, req):
+ self._oprot.writeMessageBegin('flush', TMessageType.CALL, self._seqid)
+ args = flush_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_flush(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = flush_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "flush failed: unknown result")
+
+ def clearCache(self):
+ """
+ Clear the cache of chunk, chunk metadata and timeseries metadata to release the memory footprint on all DataNodes
+
+ """
+ self.send_clearCache()
+ return self.recv_clearCache()
+
+ def send_clearCache(self):
+ self._oprot.writeMessageBegin('clearCache', TMessageType.CALL, self._seqid)
+ args = clearCache_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_clearCache(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = clearCache_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "clearCache failed: unknown result")
+
+ def loadConfiguration(self):
+ """
+ Load configuration on all DataNodes
+
+ """
+ self.send_loadConfiguration()
+ return self.recv_loadConfiguration()
+
+ def send_loadConfiguration(self):
+ self._oprot.writeMessageBegin('loadConfiguration', TMessageType.CALL, self._seqid)
+ args = loadConfiguration_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_loadConfiguration(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = loadConfiguration_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "loadConfiguration failed: unknown result")
+
+ def setSystemStatus(self, status):
+ """
+ Set system status on DataNodes
+
+ Parameters:
+ - status
+
+ """
+ self.send_setSystemStatus(status)
+ return self.recv_setSystemStatus()
+
+ def send_setSystemStatus(self, status):
+ self._oprot.writeMessageBegin('setSystemStatus', TMessageType.CALL, self._seqid)
+ args = setSystemStatus_args()
+ args.status = status
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_setSystemStatus(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = setSystemStatus_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "setSystemStatus failed: unknown result")
+
+ def setDataNodeStatus(self, req):
+ """
+ TestOnly. Set the target DataNode to the specified status
+
+ Parameters:
+ - req
+
+ """
+ self.send_setDataNodeStatus(req)
+ return self.recv_setDataNodeStatus()
+
+ def send_setDataNodeStatus(self, req):
+ self._oprot.writeMessageBegin('setDataNodeStatus', TMessageType.CALL, self._seqid)
+ args = setDataNodeStatus_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_setDataNodeStatus(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = setDataNodeStatus_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "setDataNodeStatus failed: unknown result")
+
+ def migrateRegion(self, req):
+ """
+ Migrate a region replica from one dataNode to another
+
+ Parameters:
+ - req
+
+ """
+ self.send_migrateRegion(req)
+ return self.recv_migrateRegion()
+
+ def send_migrateRegion(self, req):
+ self._oprot.writeMessageBegin('migrateRegion', TMessageType.CALL, self._seqid)
+ args = migrateRegion_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_migrateRegion(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = migrateRegion_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "migrateRegion failed: unknown result")
+
+ def killQuery(self, queryId, dataNodeId):
+ """
+ Kill query
+
+ Parameters:
+ - queryId
+ - dataNodeId
+
+ """
+ self.send_killQuery(queryId, dataNodeId)
+ return self.recv_killQuery()
+
+ def send_killQuery(self, queryId, dataNodeId):
+ self._oprot.writeMessageBegin('killQuery', TMessageType.CALL, self._seqid)
+ args = killQuery_args()
+ args.queryId = queryId
+ args.dataNodeId = dataNodeId
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_killQuery(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = killQuery_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "killQuery failed: unknown result")
+
+ def getRunningDataNodeLocations(self):
+ """
+ Get all DataNodeLocations of Running DataNodes
+
+ """
+ self.send_getRunningDataNodeLocations()
+ return self.recv_getRunningDataNodeLocations()
+
+ def send_getRunningDataNodeLocations(self):
+ self._oprot.writeMessageBegin('getRunningDataNodeLocations', TMessageType.CALL, self._seqid)
+ args = getRunningDataNodeLocations_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getRunningDataNodeLocations(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getRunningDataNodeLocations_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getRunningDataNodeLocations failed: unknown result")
+
+ def showCluster(self):
+ """
+ Show cluster ConfigNodes' and DataNodes' information
+
+ """
+ self.send_showCluster()
+ return self.recv_showCluster()
+
+ def send_showCluster(self):
+ self._oprot.writeMessageBegin('showCluster', TMessageType.CALL, self._seqid)
+ args = showCluster_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_showCluster(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = showCluster_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "showCluster failed: unknown result")
+
+ def showVariables(self):
+ """
+ Show variables who should be consist in the same cluster
+
+ """
+ self.send_showVariables()
+ return self.recv_showVariables()
+
+ def send_showVariables(self):
+ self._oprot.writeMessageBegin('showVariables', TMessageType.CALL, self._seqid)
+ args = showVariables_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_showVariables(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = showVariables_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "showVariables failed: unknown result")
+
+ def showDataNodes(self):
+ """
+ Show cluster DataNodes' information
+
+ """
+ self.send_showDataNodes()
+ return self.recv_showDataNodes()
+
+ def send_showDataNodes(self):
+ self._oprot.writeMessageBegin('showDataNodes', TMessageType.CALL, self._seqid)
+ args = showDataNodes_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_showDataNodes(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = showDataNodes_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "showDataNodes failed: unknown result")
+
+ def showConfigNodes(self):
+ """
+ Show cluster ConfigNodes' information
+
+ """
+ self.send_showConfigNodes()
+ return self.recv_showConfigNodes()
+
+ def send_showConfigNodes(self):
+ self._oprot.writeMessageBegin('showConfigNodes', TMessageType.CALL, self._seqid)
+ args = showConfigNodes_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_showConfigNodes(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = showConfigNodes_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "showConfigNodes failed: unknown result")
+
+ def showStorageGroup(self, storageGroupPathPattern):
+ """
+ Show cluster StorageGroups' information
+
+ Parameters:
+ - storageGroupPathPattern
+
+ """
+ self.send_showStorageGroup(storageGroupPathPattern)
+ return self.recv_showStorageGroup()
+
+ def send_showStorageGroup(self, storageGroupPathPattern):
+ self._oprot.writeMessageBegin('showStorageGroup', TMessageType.CALL, self._seqid)
+ args = showStorageGroup_args()
+ args.storageGroupPathPattern = storageGroupPathPattern
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_showStorageGroup(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = showStorageGroup_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "showStorageGroup failed: unknown result")
+
+ def showRegion(self, req):
+ """
+ Show the matched cluster Regions' information
+ See https://apache-iotdb.feishu.cn/docx/doxcnOzmIlaE2MX5tKjmYWuMSRg for detailed matching rules
+
+ Parameters:
+ - req
+
+ """
+ self.send_showRegion(req)
+ return self.recv_showRegion()
+
+ def send_showRegion(self, req):
+ self._oprot.writeMessageBegin('showRegion', TMessageType.CALL, self._seqid)
+ args = showRegion_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_showRegion(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = showRegion_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "showRegion failed: unknown result")
+
+ def getLatestRegionRouteMap(self):
+ """
+ The ConfigNode-leader will generate and return a latest RegionRouteMap
+
+ """
+ self.send_getLatestRegionRouteMap()
+ return self.recv_getLatestRegionRouteMap()
+
+ def send_getLatestRegionRouteMap(self):
+ self._oprot.writeMessageBegin('getLatestRegionRouteMap', TMessageType.CALL, self._seqid)
+ args = getLatestRegionRouteMap_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getLatestRegionRouteMap(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getLatestRegionRouteMap_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getLatestRegionRouteMap failed: unknown result")
+
+ def createSchemaTemplate(self, req):
+ """
+ Create schema template
+
+ Parameters:
+ - req
+
+ """
+ self.send_createSchemaTemplate(req)
+ return self.recv_createSchemaTemplate()
+
+ def send_createSchemaTemplate(self, req):
+ self._oprot.writeMessageBegin('createSchemaTemplate', TMessageType.CALL, self._seqid)
+ args = createSchemaTemplate_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_createSchemaTemplate(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = createSchemaTemplate_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "createSchemaTemplate failed: unknown result")
+
+ def getAllTemplates(self):
+ """
+ Get all schema template info and template set info for DataNode registeration
+
+ """
+ self.send_getAllTemplates()
+ return self.recv_getAllTemplates()
+
+ def send_getAllTemplates(self):
+ self._oprot.writeMessageBegin('getAllTemplates', TMessageType.CALL, self._seqid)
+ args = getAllTemplates_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getAllTemplates(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getAllTemplates_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getAllTemplates failed: unknown result")
+
+ def getTemplate(self, req):
+ """
+ Get one schema template info
+
+ Parameters:
+ - req
+
+ """
+ self.send_getTemplate(req)
+ return self.recv_getTemplate()
+
+ def send_getTemplate(self, req):
+ self._oprot.writeMessageBegin('getTemplate', TMessageType.CALL, self._seqid)
+ args = getTemplate_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getTemplate(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getTemplate_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getTemplate failed: unknown result")
+
+ def setSchemaTemplate(self, req):
+ """
+ Set given schema template to given path
+
+ Parameters:
+ - req
+
+ """
+ self.send_setSchemaTemplate(req)
+ return self.recv_setSchemaTemplate()
+
+ def send_setSchemaTemplate(self, req):
+ self._oprot.writeMessageBegin('setSchemaTemplate', TMessageType.CALL, self._seqid)
+ args = setSchemaTemplate_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_setSchemaTemplate(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = setSchemaTemplate_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "setSchemaTemplate failed: unknown result")
+
+ def getPathsSetTemplate(self, req):
+ """
+ Get paths setting given schema template
+
+ Parameters:
+ - req
+
+ """
+ self.send_getPathsSetTemplate(req)
+ return self.recv_getPathsSetTemplate()
+
+ def send_getPathsSetTemplate(self, req):
+ self._oprot.writeMessageBegin('getPathsSetTemplate', TMessageType.CALL, self._seqid)
+ args = getPathsSetTemplate_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getPathsSetTemplate(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getPathsSetTemplate_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getPathsSetTemplate failed: unknown result")
+
+ def deactivateSchemaTemplate(self, req):
+ """
+ Deactivate schema template from paths matched by given pattern tree in cluster
+
+ Parameters:
+ - req
+
+ """
+ self.send_deactivateSchemaTemplate(req)
+ return self.recv_deactivateSchemaTemplate()
+
+ def send_deactivateSchemaTemplate(self, req):
+ self._oprot.writeMessageBegin('deactivateSchemaTemplate', TMessageType.CALL, self._seqid)
+ args = deactivateSchemaTemplate_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_deactivateSchemaTemplate(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = deactivateSchemaTemplate_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "deactivateSchemaTemplate failed: unknown result")
+
+ def unsetSchemaTemplate(self, req):
+ """
+ Unset schema template from given path
+
+ Parameters:
+ - req
+
+ """
+ self.send_unsetSchemaTemplate(req)
+ return self.recv_unsetSchemaTemplate()
+
+ def send_unsetSchemaTemplate(self, req):
+ self._oprot.writeMessageBegin('unsetSchemaTemplate', TMessageType.CALL, self._seqid)
+ args = unsetSchemaTemplate_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_unsetSchemaTemplate(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = unsetSchemaTemplate_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "unsetSchemaTemplate failed: unknown result")
+
+ def dropSchemaTemplate(self, req):
+ """
+ Drop schema template
+
+ Parameters:
+ - req
+
+ """
+ self.send_dropSchemaTemplate(req)
+ return self.recv_dropSchemaTemplate()
+
+ def send_dropSchemaTemplate(self, req):
+ self._oprot.writeMessageBegin('dropSchemaTemplate', TMessageType.CALL, self._seqid)
+ args = dropSchemaTemplate_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_dropSchemaTemplate(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = dropSchemaTemplate_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "dropSchemaTemplate failed: unknown result")
+
+ def deleteTimeSeries(self, req):
+ """
+ Generate a set of DeleteTimeSeriesProcedure to delete some specific TimeSeries
+
+ @return SUCCESS_STATUS if the DeleteTimeSeriesProcedure submitted and executed successfully
+ TIMESERIES_NOT_EXIST if the specific TimeSeries doesn't exist
+ EXECUTE_STATEMENT_ERROR if failed to submit or execute the DeleteTimeSeriesProcedure
+
+ Parameters:
+ - req
+
+ """
+ self.send_deleteTimeSeries(req)
+ return self.recv_deleteTimeSeries()
+
+ def send_deleteTimeSeries(self, req):
+ self._oprot.writeMessageBegin('deleteTimeSeries', TMessageType.CALL, self._seqid)
+ args = deleteTimeSeries_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_deleteTimeSeries(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = deleteTimeSeries_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteTimeSeries failed: unknown result")
+
+ def createPipeSink(self, req):
+ """
+ Create PipeSink
+
+ Parameters:
+ - req
+
+ """
+ self.send_createPipeSink(req)
+ return self.recv_createPipeSink()
+
+ def send_createPipeSink(self, req):
+ self._oprot.writeMessageBegin('createPipeSink', TMessageType.CALL, self._seqid)
+ args = createPipeSink_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_createPipeSink(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = createPipeSink_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "createPipeSink failed: unknown result")
+
+ def dropPipeSink(self, req):
+ """
+ Drop PipeSink
+
+ Parameters:
+ - req
+
+ """
+ self.send_dropPipeSink(req)
+ return self.recv_dropPipeSink()
+
+ def send_dropPipeSink(self, req):
+ self._oprot.writeMessageBegin('dropPipeSink', TMessageType.CALL, self._seqid)
+ args = dropPipeSink_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_dropPipeSink(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = dropPipeSink_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "dropPipeSink failed: unknown result")
+
+ def getPipeSink(self, req):
+ """
+ Get PipeSink by name, if name is empty, get all PipeSink
+
+ Parameters:
+ - req
+
+ """
+ self.send_getPipeSink(req)
+ return self.recv_getPipeSink()
+
+ def send_getPipeSink(self, req):
+ self._oprot.writeMessageBegin('getPipeSink', TMessageType.CALL, self._seqid)
+ args = getPipeSink_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getPipeSink(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getPipeSink_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getPipeSink failed: unknown result")
+
+ def createPipe(self, req):
+ """
+ Create Pipe
+
+ Parameters:
+ - req
+
+ """
+ self.send_createPipe(req)
+ return self.recv_createPipe()
+
+ def send_createPipe(self, req):
+ self._oprot.writeMessageBegin('createPipe', TMessageType.CALL, self._seqid)
+ args = createPipe_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_createPipe(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = createPipe_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "createPipe failed: unknown result")
+
+ def startPipe(self, pipeName):
+ """
+ Start Pipe
+
+ Parameters:
+ - pipeName
+
+ """
+ self.send_startPipe(pipeName)
+ return self.recv_startPipe()
+
+ def send_startPipe(self, pipeName):
+ self._oprot.writeMessageBegin('startPipe', TMessageType.CALL, self._seqid)
+ args = startPipe_args()
+ args.pipeName = pipeName
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_startPipe(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = startPipe_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "startPipe failed: unknown result")
+
+ def stopPipe(self, pipeName):
+ """
+ Stop Pipe
+
+ Parameters:
+ - pipeName
+
+ """
+ self.send_stopPipe(pipeName)
+ return self.recv_stopPipe()
+
+ def send_stopPipe(self, pipeName):
+ self._oprot.writeMessageBegin('stopPipe', TMessageType.CALL, self._seqid)
+ args = stopPipe_args()
+ args.pipeName = pipeName
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_stopPipe(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = stopPipe_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "stopPipe failed: unknown result")
+
+ def dropPipe(self, pipeName):
+ """
+ Drop Pipe
+
+ Parameters:
+ - pipeName
+
+ """
+ self.send_dropPipe(pipeName)
+ return self.recv_dropPipe()
+
+ def send_dropPipe(self, pipeName):
+ self._oprot.writeMessageBegin('dropPipe', TMessageType.CALL, self._seqid)
+ args = dropPipe_args()
+ args.pipeName = pipeName
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_dropPipe(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = dropPipe_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "dropPipe failed: unknown result")
+
+ def showPipe(self, req):
+ """
+ Show Pipe by name, if name is empty, show all Pipe
+
+ Parameters:
+ - req
+
+ """
+ self.send_showPipe(req)
+ return self.recv_showPipe()
+
+ def send_showPipe(self, req):
+ self._oprot.writeMessageBegin('showPipe', TMessageType.CALL, self._seqid)
+ args = showPipe_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_showPipe(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = showPipe_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "showPipe failed: unknown result")
+
+ def getAllPipeInfo(self):
+ self.send_getAllPipeInfo()
+ return self.recv_getAllPipeInfo()
+
+ def send_getAllPipeInfo(self):
+ self._oprot.writeMessageBegin('getAllPipeInfo', TMessageType.CALL, self._seqid)
+ args = getAllPipeInfo_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getAllPipeInfo(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getAllPipeInfo_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getAllPipeInfo failed: unknown result")
+
+ def recordPipeMessage(self, req):
+ """
+ Parameters:
+ - req
+
+ """
+ self.send_recordPipeMessage(req)
+ return self.recv_recordPipeMessage()
+
+ def send_recordPipeMessage(self, req):
+ self._oprot.writeMessageBegin('recordPipeMessage', TMessageType.CALL, self._seqid)
+ args = recordPipeMessage_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_recordPipeMessage(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = recordPipeMessage_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "recordPipeMessage failed: unknown result")
+
+ def getRegionId(self, req):
+ """
+ Get a particular DataPartition's corresponding Regions
+
+ Parameters:
+ - req
+
+ """
+ self.send_getRegionId(req)
+ return self.recv_getRegionId()
+
+ def send_getRegionId(self, req):
+ self._oprot.writeMessageBegin('getRegionId', TMessageType.CALL, self._seqid)
+ args = getRegionId_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getRegionId(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getRegionId_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getRegionId failed: unknown result")
+
+ def getTimeSlotList(self, req):
+ """
+ Get a specific SeriesSlot's TimeSlots by start time and end time
+
+ Parameters:
+ - req
+
+ """
+ self.send_getTimeSlotList(req)
+ return self.recv_getTimeSlotList()
+
+ def send_getTimeSlotList(self, req):
+ self._oprot.writeMessageBegin('getTimeSlotList', TMessageType.CALL, self._seqid)
+ args = getTimeSlotList_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getTimeSlotList(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getTimeSlotList_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getTimeSlotList failed: unknown result")
+
+ def getSeriesSlotList(self, req):
+ """
+ Get the given database's assigned SeriesSlots
+
+ Parameters:
+ - req
+
+ """
+ self.send_getSeriesSlotList(req)
+ return self.recv_getSeriesSlotList()
+
+ def send_getSeriesSlotList(self, req):
+ self._oprot.writeMessageBegin('getSeriesSlotList', TMessageType.CALL, self._seqid)
+ args = getSeriesSlotList_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_getSeriesSlotList(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = getSeriesSlotList_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "getSeriesSlotList failed: unknown result")
+
+ def createCQ(self, req):
+ """
+ Create a CQ
+
+ @return SUCCESS_STATUS if the cq was created successfully
+
+ Parameters:
+ - req
+
+ """
+ self.send_createCQ(req)
+ return self.recv_createCQ()
+
+ def send_createCQ(self, req):
+ self._oprot.writeMessageBegin('createCQ', TMessageType.CALL, self._seqid)
+ args = createCQ_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_createCQ(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = createCQ_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "createCQ failed: unknown result")
+
+ def dropCQ(self, req):
+ """
+ Drop a CQ
+
+ @return SUCCESS_STATUS if the CQ was removed successfully
+
+ Parameters:
+ - req
+
+ """
+ self.send_dropCQ(req)
+ return self.recv_dropCQ()
+
+ def send_dropCQ(self, req):
+ self._oprot.writeMessageBegin('dropCQ', TMessageType.CALL, self._seqid)
+ args = dropCQ_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_dropCQ(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = dropCQ_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "dropCQ failed: unknown result")
+
+ def showCQ(self):
+ """
+ Return the cq table of config leader
+
+ """
+ self.send_showCQ()
+ return self.recv_showCQ()
+
+ def send_showCQ(self):
+ self._oprot.writeMessageBegin('showCQ', TMessageType.CALL, self._seqid)
+ args = showCQ_args()
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_showCQ(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = showCQ_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "showCQ failed: unknown result")
+
+ def createModel(self, req):
+ """
+ Create a model
+
+ @return SUCCESS_STATUS if the model was created successfully
+
+ Parameters:
+ - req
+
+ """
+ self.send_createModel(req)
+ return self.recv_createModel()
+
+ def send_createModel(self, req):
+ self._oprot.writeMessageBegin('createModel', TMessageType.CALL, self._seqid)
+ args = createModel_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_createModel(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = createModel_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "createModel failed: unknown result")
+
+ def dropModel(self, req):
+ """
+ Drop a model
+
+ @return SUCCESS_STATUS if the model was removed successfully
+
+ Parameters:
+ - req
+
+ """
+ self.send_dropModel(req)
+ return self.recv_dropModel()
+
+ def send_dropModel(self, req):
+ self._oprot.writeMessageBegin('dropModel', TMessageType.CALL, self._seqid)
+ args = dropModel_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_dropModel(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = dropModel_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "dropModel failed: unknown result")
+
+ def showModel(self, req):
+ """
+ Return the model table
+
+ Parameters:
+ - req
+
+ """
+ self.send_showModel(req)
+ return self.recv_showModel()
+
+ def send_showModel(self, req):
+ self._oprot.writeMessageBegin('showModel', TMessageType.CALL, self._seqid)
+ args = showModel_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_showModel(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = showModel_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "showModel failed: unknown result")
+
+ def showTrail(self, req):
+ """
+ Return the trail table
+
+ Parameters:
+ - req
+
+ """
+ self.send_showTrail(req)
+ return self.recv_showTrail()
+
+ def send_showTrail(self, req):
+ self._oprot.writeMessageBegin('showTrail', TMessageType.CALL, self._seqid)
+ args = showTrail_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_showTrail(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = showTrail_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "showTrail failed: unknown result")
+
+ def updateModelInfo(self, req):
+ """
+ Update the model info
+
+ @return SUCCESS_STATUS if the model was removed successfully
+
+ Parameters:
+ - req
+
+ """
+ self.send_updateModelInfo(req)
+ return self.recv_updateModelInfo()
+
+ def send_updateModelInfo(self, req):
+ self._oprot.writeMessageBegin('updateModelInfo', TMessageType.CALL, self._seqid)
+ args = updateModelInfo_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_updateModelInfo(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = updateModelInfo_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "updateModelInfo failed: unknown result")
+
+
+class Processor(Iface, TProcessor):
+ def __init__(self, handler):
+ self._handler = handler
+ self._processMap = {}
+ self._processMap["registerDataNode"] = Processor.process_registerDataNode
+ self._processMap["restartDataNode"] = Processor.process_restartDataNode
+ self._processMap["getSystemConfiguration"] = Processor.process_getSystemConfiguration
+ self._processMap["removeDataNode"] = Processor.process_removeDataNode
+ self._processMap["updateDataNode"] = Processor.process_updateDataNode
+ self._processMap["getDataNodeConfiguration"] = Processor.process_getDataNodeConfiguration
+ self._processMap["reportRegionMigrateResult"] = Processor.process_reportRegionMigrateResult
+ self._processMap["setStorageGroup"] = Processor.process_setStorageGroup
+ self._processMap["deleteStorageGroup"] = Processor.process_deleteStorageGroup
+ self._processMap["deleteStorageGroups"] = Processor.process_deleteStorageGroups
+ self._processMap["setTTL"] = Processor.process_setTTL
+ self._processMap["setSchemaReplicationFactor"] = Processor.process_setSchemaReplicationFactor
+ self._processMap["setDataReplicationFactor"] = Processor.process_setDataReplicationFactor
+ self._processMap["setTimePartitionInterval"] = Processor.process_setTimePartitionInterval
+ self._processMap["countMatchedStorageGroups"] = Processor.process_countMatchedStorageGroups
+ self._processMap["getMatchedStorageGroupSchemas"] = Processor.process_getMatchedStorageGroupSchemas
+ self._processMap["getSchemaPartitionTable"] = Processor.process_getSchemaPartitionTable
+ self._processMap["getOrCreateSchemaPartitionTable"] = Processor.process_getOrCreateSchemaPartitionTable
+ self._processMap["getSchemaNodeManagementPartition"] = Processor.process_getSchemaNodeManagementPartition
+ self._processMap["getDataPartitionTable"] = Processor.process_getDataPartitionTable
+ self._processMap["getOrCreateDataPartitionTable"] = Processor.process_getOrCreateDataPartitionTable
+ self._processMap["operatePermission"] = Processor.process_operatePermission
+ self._processMap["queryPermission"] = Processor.process_queryPermission
+ self._processMap["login"] = Processor.process_login
+ self._processMap["checkUserPrivileges"] = Processor.process_checkUserPrivileges
+ self._processMap["registerConfigNode"] = Processor.process_registerConfigNode
+ self._processMap["addConsensusGroup"] = Processor.process_addConsensusGroup
+ self._processMap["notifyRegisterSuccess"] = Processor.process_notifyRegisterSuccess
+ self._processMap["restartConfigNode"] = Processor.process_restartConfigNode
+ self._processMap["removeConfigNode"] = Processor.process_removeConfigNode
+ self._processMap["deleteConfigNodePeer"] = Processor.process_deleteConfigNodePeer
+ self._processMap["stopConfigNode"] = Processor.process_stopConfigNode
+ self._processMap["getConfigNodeHeartBeat"] = Processor.process_getConfigNodeHeartBeat
+ self._processMap["createFunction"] = Processor.process_createFunction
+ self._processMap["dropFunction"] = Processor.process_dropFunction
+ self._processMap["getUDFTable"] = Processor.process_getUDFTable
+ self._processMap["getUDFJar"] = Processor.process_getUDFJar
+ self._processMap["createTrigger"] = Processor.process_createTrigger
+ self._processMap["dropTrigger"] = Processor.process_dropTrigger
+ self._processMap["getLocationOfStatefulTrigger"] = Processor.process_getLocationOfStatefulTrigger
+ self._processMap["getTriggerTable"] = Processor.process_getTriggerTable
+ self._processMap["getStatefulTriggerTable"] = Processor.process_getStatefulTriggerTable
+ self._processMap["getTriggerJar"] = Processor.process_getTriggerJar
+ self._processMap["merge"] = Processor.process_merge
+ self._processMap["flush"] = Processor.process_flush
+ self._processMap["clearCache"] = Processor.process_clearCache
+ self._processMap["loadConfiguration"] = Processor.process_loadConfiguration
+ self._processMap["setSystemStatus"] = Processor.process_setSystemStatus
+ self._processMap["setDataNodeStatus"] = Processor.process_setDataNodeStatus
+ self._processMap["migrateRegion"] = Processor.process_migrateRegion
+ self._processMap["killQuery"] = Processor.process_killQuery
+ self._processMap["getRunningDataNodeLocations"] = Processor.process_getRunningDataNodeLocations
+ self._processMap["showCluster"] = Processor.process_showCluster
+ self._processMap["showVariables"] = Processor.process_showVariables
+ self._processMap["showDataNodes"] = Processor.process_showDataNodes
+ self._processMap["showConfigNodes"] = Processor.process_showConfigNodes
+ self._processMap["showStorageGroup"] = Processor.process_showStorageGroup
+ self._processMap["showRegion"] = Processor.process_showRegion
+ self._processMap["getLatestRegionRouteMap"] = Processor.process_getLatestRegionRouteMap
+ self._processMap["createSchemaTemplate"] = Processor.process_createSchemaTemplate
+ self._processMap["getAllTemplates"] = Processor.process_getAllTemplates
+ self._processMap["getTemplate"] = Processor.process_getTemplate
+ self._processMap["setSchemaTemplate"] = Processor.process_setSchemaTemplate
+ self._processMap["getPathsSetTemplate"] = Processor.process_getPathsSetTemplate
+ self._processMap["deactivateSchemaTemplate"] = Processor.process_deactivateSchemaTemplate
+ self._processMap["unsetSchemaTemplate"] = Processor.process_unsetSchemaTemplate
+ self._processMap["dropSchemaTemplate"] = Processor.process_dropSchemaTemplate
+ self._processMap["deleteTimeSeries"] = Processor.process_deleteTimeSeries
+ self._processMap["createPipeSink"] = Processor.process_createPipeSink
+ self._processMap["dropPipeSink"] = Processor.process_dropPipeSink
+ self._processMap["getPipeSink"] = Processor.process_getPipeSink
+ self._processMap["createPipe"] = Processor.process_createPipe
+ self._processMap["startPipe"] = Processor.process_startPipe
+ self._processMap["stopPipe"] = Processor.process_stopPipe
+ self._processMap["dropPipe"] = Processor.process_dropPipe
+ self._processMap["showPipe"] = Processor.process_showPipe
+ self._processMap["getAllPipeInfo"] = Processor.process_getAllPipeInfo
+ self._processMap["recordPipeMessage"] = Processor.process_recordPipeMessage
+ self._processMap["getRegionId"] = Processor.process_getRegionId
+ self._processMap["getTimeSlotList"] = Processor.process_getTimeSlotList
+ self._processMap["getSeriesSlotList"] = Processor.process_getSeriesSlotList
+ self._processMap["createCQ"] = Processor.process_createCQ
+ self._processMap["dropCQ"] = Processor.process_dropCQ
+ self._processMap["showCQ"] = Processor.process_showCQ
+ self._processMap["createModel"] = Processor.process_createModel
+ self._processMap["dropModel"] = Processor.process_dropModel
+ self._processMap["showModel"] = Processor.process_showModel
+ self._processMap["showTrail"] = Processor.process_showTrail
+ self._processMap["updateModelInfo"] = Processor.process_updateModelInfo
+ self._on_message_begin = None
+
+ def on_message_begin(self, func):
+ self._on_message_begin = func
+
+ def process(self, iprot, oprot):
+ (name, type, seqid) = iprot.readMessageBegin()
+ if self._on_message_begin:
+ self._on_message_begin(name, type, seqid)
+ if name not in self._processMap:
+ iprot.skip(TType.STRUCT)
+ iprot.readMessageEnd()
+ x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
+ oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
+ x.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+ return
+ else:
+ self._processMap[name](self, seqid, iprot, oprot)
+ return True
+
+ def process_registerDataNode(self, seqid, iprot, oprot):
+ args = registerDataNode_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = registerDataNode_result()
+ try:
+ result.success = self._handler.registerDataNode(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("registerDataNode", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_restartDataNode(self, seqid, iprot, oprot):
+ args = restartDataNode_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = restartDataNode_result()
+ try:
+ result.success = self._handler.restartDataNode(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("restartDataNode", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getSystemConfiguration(self, seqid, iprot, oprot):
+ args = getSystemConfiguration_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getSystemConfiguration_result()
+ try:
+ result.success = self._handler.getSystemConfiguration()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getSystemConfiguration", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_removeDataNode(self, seqid, iprot, oprot):
+ args = removeDataNode_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = removeDataNode_result()
+ try:
+ result.success = self._handler.removeDataNode(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("removeDataNode", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_updateDataNode(self, seqid, iprot, oprot):
+ args = updateDataNode_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = updateDataNode_result()
+ try:
+ result.success = self._handler.updateDataNode(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("updateDataNode", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getDataNodeConfiguration(self, seqid, iprot, oprot):
+ args = getDataNodeConfiguration_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getDataNodeConfiguration_result()
+ try:
+ result.success = self._handler.getDataNodeConfiguration(args.dataNodeId)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getDataNodeConfiguration", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_reportRegionMigrateResult(self, seqid, iprot, oprot):
+ args = reportRegionMigrateResult_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = reportRegionMigrateResult_result()
+ try:
+ result.success = self._handler.reportRegionMigrateResult(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("reportRegionMigrateResult", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_setStorageGroup(self, seqid, iprot, oprot):
+ args = setStorageGroup_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = setStorageGroup_result()
+ try:
+ result.success = self._handler.setStorageGroup(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("setStorageGroup", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_deleteStorageGroup(self, seqid, iprot, oprot):
+ args = deleteStorageGroup_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = deleteStorageGroup_result()
+ try:
+ result.success = self._handler.deleteStorageGroup(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("deleteStorageGroup", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_deleteStorageGroups(self, seqid, iprot, oprot):
+ args = deleteStorageGroups_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = deleteStorageGroups_result()
+ try:
+ result.success = self._handler.deleteStorageGroups(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("deleteStorageGroups", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_setTTL(self, seqid, iprot, oprot):
+ args = setTTL_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = setTTL_result()
+ try:
+ result.success = self._handler.setTTL(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("setTTL", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_setSchemaReplicationFactor(self, seqid, iprot, oprot):
+ args = setSchemaReplicationFactor_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = setSchemaReplicationFactor_result()
+ try:
+ result.success = self._handler.setSchemaReplicationFactor(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("setSchemaReplicationFactor", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_setDataReplicationFactor(self, seqid, iprot, oprot):
+ args = setDataReplicationFactor_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = setDataReplicationFactor_result()
+ try:
+ result.success = self._handler.setDataReplicationFactor(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("setDataReplicationFactor", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_setTimePartitionInterval(self, seqid, iprot, oprot):
+ args = setTimePartitionInterval_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = setTimePartitionInterval_result()
+ try:
+ result.success = self._handler.setTimePartitionInterval(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("setTimePartitionInterval", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_countMatchedStorageGroups(self, seqid, iprot, oprot):
+ args = countMatchedStorageGroups_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = countMatchedStorageGroups_result()
+ try:
+ result.success = self._handler.countMatchedStorageGroups(args.storageGroupPathPattern)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("countMatchedStorageGroups", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getMatchedStorageGroupSchemas(self, seqid, iprot, oprot):
+ args = getMatchedStorageGroupSchemas_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getMatchedStorageGroupSchemas_result()
+ try:
+ result.success = self._handler.getMatchedStorageGroupSchemas(args.storageGroupPathPattern)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getMatchedStorageGroupSchemas", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getSchemaPartitionTable(self, seqid, iprot, oprot):
+ args = getSchemaPartitionTable_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getSchemaPartitionTable_result()
+ try:
+ result.success = self._handler.getSchemaPartitionTable(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getSchemaPartitionTable", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getOrCreateSchemaPartitionTable(self, seqid, iprot, oprot):
+ args = getOrCreateSchemaPartitionTable_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getOrCreateSchemaPartitionTable_result()
+ try:
+ result.success = self._handler.getOrCreateSchemaPartitionTable(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getOrCreateSchemaPartitionTable", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getSchemaNodeManagementPartition(self, seqid, iprot, oprot):
+ args = getSchemaNodeManagementPartition_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getSchemaNodeManagementPartition_result()
+ try:
+ result.success = self._handler.getSchemaNodeManagementPartition(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getSchemaNodeManagementPartition", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getDataPartitionTable(self, seqid, iprot, oprot):
+ args = getDataPartitionTable_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getDataPartitionTable_result()
+ try:
+ result.success = self._handler.getDataPartitionTable(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getDataPartitionTable", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getOrCreateDataPartitionTable(self, seqid, iprot, oprot):
+ args = getOrCreateDataPartitionTable_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getOrCreateDataPartitionTable_result()
+ try:
+ result.success = self._handler.getOrCreateDataPartitionTable(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getOrCreateDataPartitionTable", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_operatePermission(self, seqid, iprot, oprot):
+ args = operatePermission_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = operatePermission_result()
+ try:
+ result.success = self._handler.operatePermission(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("operatePermission", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_queryPermission(self, seqid, iprot, oprot):
+ args = queryPermission_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = queryPermission_result()
+ try:
+ result.success = self._handler.queryPermission(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("queryPermission", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_login(self, seqid, iprot, oprot):
+ args = login_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = login_result()
+ try:
+ result.success = self._handler.login(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("login", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_checkUserPrivileges(self, seqid, iprot, oprot):
+ args = checkUserPrivileges_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = checkUserPrivileges_result()
+ try:
+ result.success = self._handler.checkUserPrivileges(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("checkUserPrivileges", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_registerConfigNode(self, seqid, iprot, oprot):
+ args = registerConfigNode_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = registerConfigNode_result()
+ try:
+ result.success = self._handler.registerConfigNode(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("registerConfigNode", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_addConsensusGroup(self, seqid, iprot, oprot):
+ args = addConsensusGroup_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = addConsensusGroup_result()
+ try:
+ result.success = self._handler.addConsensusGroup(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("addConsensusGroup", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_notifyRegisterSuccess(self, seqid, iprot, oprot):
+ args = notifyRegisterSuccess_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = notifyRegisterSuccess_result()
+ try:
+ result.success = self._handler.notifyRegisterSuccess()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("notifyRegisterSuccess", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_restartConfigNode(self, seqid, iprot, oprot):
+ args = restartConfigNode_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = restartConfigNode_result()
+ try:
+ result.success = self._handler.restartConfigNode(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("restartConfigNode", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_removeConfigNode(self, seqid, iprot, oprot):
+ args = removeConfigNode_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = removeConfigNode_result()
+ try:
+ result.success = self._handler.removeConfigNode(args.configNodeLocation)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("removeConfigNode", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_deleteConfigNodePeer(self, seqid, iprot, oprot):
+ args = deleteConfigNodePeer_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = deleteConfigNodePeer_result()
+ try:
+ result.success = self._handler.deleteConfigNodePeer(args.configNodeLocation)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("deleteConfigNodePeer", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_stopConfigNode(self, seqid, iprot, oprot):
+ args = stopConfigNode_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = stopConfigNode_result()
+ try:
+ result.success = self._handler.stopConfigNode(args.configNodeLocation)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("stopConfigNode", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getConfigNodeHeartBeat(self, seqid, iprot, oprot):
+ args = getConfigNodeHeartBeat_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getConfigNodeHeartBeat_result()
+ try:
+ result.success = self._handler.getConfigNodeHeartBeat(args.timestamp)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getConfigNodeHeartBeat", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_createFunction(self, seqid, iprot, oprot):
+ args = createFunction_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = createFunction_result()
+ try:
+ result.success = self._handler.createFunction(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("createFunction", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_dropFunction(self, seqid, iprot, oprot):
+ args = dropFunction_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = dropFunction_result()
+ try:
+ result.success = self._handler.dropFunction(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("dropFunction", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getUDFTable(self, seqid, iprot, oprot):
+ args = getUDFTable_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getUDFTable_result()
+ try:
+ result.success = self._handler.getUDFTable()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getUDFTable", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getUDFJar(self, seqid, iprot, oprot):
+ args = getUDFJar_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getUDFJar_result()
+ try:
+ result.success = self._handler.getUDFJar(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getUDFJar", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_createTrigger(self, seqid, iprot, oprot):
+ args = createTrigger_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = createTrigger_result()
+ try:
+ result.success = self._handler.createTrigger(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("createTrigger", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_dropTrigger(self, seqid, iprot, oprot):
+ args = dropTrigger_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = dropTrigger_result()
+ try:
+ result.success = self._handler.dropTrigger(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("dropTrigger", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getLocationOfStatefulTrigger(self, seqid, iprot, oprot):
+ args = getLocationOfStatefulTrigger_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getLocationOfStatefulTrigger_result()
+ try:
+ result.success = self._handler.getLocationOfStatefulTrigger(args.triggerName)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getLocationOfStatefulTrigger", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getTriggerTable(self, seqid, iprot, oprot):
+ args = getTriggerTable_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getTriggerTable_result()
+ try:
+ result.success = self._handler.getTriggerTable()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getTriggerTable", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getStatefulTriggerTable(self, seqid, iprot, oprot):
+ args = getStatefulTriggerTable_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getStatefulTriggerTable_result()
+ try:
+ result.success = self._handler.getStatefulTriggerTable()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getStatefulTriggerTable", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getTriggerJar(self, seqid, iprot, oprot):
+ args = getTriggerJar_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getTriggerJar_result()
+ try:
+ result.success = self._handler.getTriggerJar(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getTriggerJar", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_merge(self, seqid, iprot, oprot):
+ args = merge_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = merge_result()
+ try:
+ result.success = self._handler.merge()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("merge", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_flush(self, seqid, iprot, oprot):
+ args = flush_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = flush_result()
+ try:
+ result.success = self._handler.flush(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("flush", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_clearCache(self, seqid, iprot, oprot):
+ args = clearCache_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = clearCache_result()
+ try:
+ result.success = self._handler.clearCache()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("clearCache", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_loadConfiguration(self, seqid, iprot, oprot):
+ args = loadConfiguration_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = loadConfiguration_result()
+ try:
+ result.success = self._handler.loadConfiguration()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("loadConfiguration", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_setSystemStatus(self, seqid, iprot, oprot):
+ args = setSystemStatus_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = setSystemStatus_result()
+ try:
+ result.success = self._handler.setSystemStatus(args.status)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("setSystemStatus", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_setDataNodeStatus(self, seqid, iprot, oprot):
+ args = setDataNodeStatus_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = setDataNodeStatus_result()
+ try:
+ result.success = self._handler.setDataNodeStatus(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("setDataNodeStatus", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_migrateRegion(self, seqid, iprot, oprot):
+ args = migrateRegion_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = migrateRegion_result()
+ try:
+ result.success = self._handler.migrateRegion(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("migrateRegion", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_killQuery(self, seqid, iprot, oprot):
+ args = killQuery_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = killQuery_result()
+ try:
+ result.success = self._handler.killQuery(args.queryId, args.dataNodeId)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("killQuery", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getRunningDataNodeLocations(self, seqid, iprot, oprot):
+ args = getRunningDataNodeLocations_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getRunningDataNodeLocations_result()
+ try:
+ result.success = self._handler.getRunningDataNodeLocations()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getRunningDataNodeLocations", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_showCluster(self, seqid, iprot, oprot):
+ args = showCluster_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = showCluster_result()
+ try:
+ result.success = self._handler.showCluster()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("showCluster", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_showVariables(self, seqid, iprot, oprot):
+ args = showVariables_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = showVariables_result()
+ try:
+ result.success = self._handler.showVariables()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("showVariables", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_showDataNodes(self, seqid, iprot, oprot):
+ args = showDataNodes_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = showDataNodes_result()
+ try:
+ result.success = self._handler.showDataNodes()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("showDataNodes", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_showConfigNodes(self, seqid, iprot, oprot):
+ args = showConfigNodes_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = showConfigNodes_result()
+ try:
+ result.success = self._handler.showConfigNodes()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("showConfigNodes", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_showStorageGroup(self, seqid, iprot, oprot):
+ args = showStorageGroup_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = showStorageGroup_result()
+ try:
+ result.success = self._handler.showStorageGroup(args.storageGroupPathPattern)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("showStorageGroup", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_showRegion(self, seqid, iprot, oprot):
+ args = showRegion_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = showRegion_result()
+ try:
+ result.success = self._handler.showRegion(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("showRegion", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getLatestRegionRouteMap(self, seqid, iprot, oprot):
+ args = getLatestRegionRouteMap_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getLatestRegionRouteMap_result()
+ try:
+ result.success = self._handler.getLatestRegionRouteMap()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getLatestRegionRouteMap", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_createSchemaTemplate(self, seqid, iprot, oprot):
+ args = createSchemaTemplate_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = createSchemaTemplate_result()
+ try:
+ result.success = self._handler.createSchemaTemplate(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("createSchemaTemplate", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getAllTemplates(self, seqid, iprot, oprot):
+ args = getAllTemplates_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getAllTemplates_result()
+ try:
+ result.success = self._handler.getAllTemplates()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getAllTemplates", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getTemplate(self, seqid, iprot, oprot):
+ args = getTemplate_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getTemplate_result()
+ try:
+ result.success = self._handler.getTemplate(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getTemplate", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_setSchemaTemplate(self, seqid, iprot, oprot):
+ args = setSchemaTemplate_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = setSchemaTemplate_result()
+ try:
+ result.success = self._handler.setSchemaTemplate(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("setSchemaTemplate", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getPathsSetTemplate(self, seqid, iprot, oprot):
+ args = getPathsSetTemplate_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getPathsSetTemplate_result()
+ try:
+ result.success = self._handler.getPathsSetTemplate(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getPathsSetTemplate", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_deactivateSchemaTemplate(self, seqid, iprot, oprot):
+ args = deactivateSchemaTemplate_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = deactivateSchemaTemplate_result()
+ try:
+ result.success = self._handler.deactivateSchemaTemplate(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("deactivateSchemaTemplate", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_unsetSchemaTemplate(self, seqid, iprot, oprot):
+ args = unsetSchemaTemplate_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = unsetSchemaTemplate_result()
+ try:
+ result.success = self._handler.unsetSchemaTemplate(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("unsetSchemaTemplate", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_dropSchemaTemplate(self, seqid, iprot, oprot):
+ args = dropSchemaTemplate_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = dropSchemaTemplate_result()
+ try:
+ result.success = self._handler.dropSchemaTemplate(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("dropSchemaTemplate", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_deleteTimeSeries(self, seqid, iprot, oprot):
+ args = deleteTimeSeries_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = deleteTimeSeries_result()
+ try:
+ result.success = self._handler.deleteTimeSeries(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("deleteTimeSeries", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_createPipeSink(self, seqid, iprot, oprot):
+ args = createPipeSink_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = createPipeSink_result()
+ try:
+ result.success = self._handler.createPipeSink(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("createPipeSink", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_dropPipeSink(self, seqid, iprot, oprot):
+ args = dropPipeSink_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = dropPipeSink_result()
+ try:
+ result.success = self._handler.dropPipeSink(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("dropPipeSink", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getPipeSink(self, seqid, iprot, oprot):
+ args = getPipeSink_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getPipeSink_result()
+ try:
+ result.success = self._handler.getPipeSink(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getPipeSink", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_createPipe(self, seqid, iprot, oprot):
+ args = createPipe_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = createPipe_result()
+ try:
+ result.success = self._handler.createPipe(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("createPipe", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_startPipe(self, seqid, iprot, oprot):
+ args = startPipe_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = startPipe_result()
+ try:
+ result.success = self._handler.startPipe(args.pipeName)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("startPipe", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_stopPipe(self, seqid, iprot, oprot):
+ args = stopPipe_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = stopPipe_result()
+ try:
+ result.success = self._handler.stopPipe(args.pipeName)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("stopPipe", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_dropPipe(self, seqid, iprot, oprot):
+ args = dropPipe_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = dropPipe_result()
+ try:
+ result.success = self._handler.dropPipe(args.pipeName)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("dropPipe", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_showPipe(self, seqid, iprot, oprot):
+ args = showPipe_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = showPipe_result()
+ try:
+ result.success = self._handler.showPipe(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("showPipe", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getAllPipeInfo(self, seqid, iprot, oprot):
+ args = getAllPipeInfo_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getAllPipeInfo_result()
+ try:
+ result.success = self._handler.getAllPipeInfo()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getAllPipeInfo", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_recordPipeMessage(self, seqid, iprot, oprot):
+ args = recordPipeMessage_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = recordPipeMessage_result()
+ try:
+ result.success = self._handler.recordPipeMessage(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("recordPipeMessage", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getRegionId(self, seqid, iprot, oprot):
+ args = getRegionId_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getRegionId_result()
+ try:
+ result.success = self._handler.getRegionId(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getRegionId", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getTimeSlotList(self, seqid, iprot, oprot):
+ args = getTimeSlotList_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getTimeSlotList_result()
+ try:
+ result.success = self._handler.getTimeSlotList(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getTimeSlotList", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_getSeriesSlotList(self, seqid, iprot, oprot):
+ args = getSeriesSlotList_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = getSeriesSlotList_result()
+ try:
+ result.success = self._handler.getSeriesSlotList(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("getSeriesSlotList", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_createCQ(self, seqid, iprot, oprot):
+ args = createCQ_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = createCQ_result()
+ try:
+ result.success = self._handler.createCQ(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("createCQ", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_dropCQ(self, seqid, iprot, oprot):
+ args = dropCQ_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = dropCQ_result()
+ try:
+ result.success = self._handler.dropCQ(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("dropCQ", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_showCQ(self, seqid, iprot, oprot):
+ args = showCQ_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = showCQ_result()
+ try:
+ result.success = self._handler.showCQ()
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("showCQ", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_createModel(self, seqid, iprot, oprot):
+ args = createModel_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = createModel_result()
+ try:
+ result.success = self._handler.createModel(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("createModel", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_dropModel(self, seqid, iprot, oprot):
+ args = dropModel_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = dropModel_result()
+ try:
+ result.success = self._handler.dropModel(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("dropModel", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_showModel(self, seqid, iprot, oprot):
+ args = showModel_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = showModel_result()
+ try:
+ result.success = self._handler.showModel(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("showModel", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_showTrail(self, seqid, iprot, oprot):
+ args = showTrail_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = showTrail_result()
+ try:
+ result.success = self._handler.showTrail(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("showTrail", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def process_updateModelInfo(self, seqid, iprot, oprot):
+ args = updateModelInfo_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = updateModelInfo_result()
+ try:
+ result.success = self._handler.updateModelInfo(args.req)
+ msg_type = TMessageType.REPLY
+ except TTransport.TTransportException:
+ raise
+ except TApplicationException as ex:
+ logging.exception('TApplication exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = ex
+ except Exception:
+ logging.exception('Unexpected exception in handler')
+ msg_type = TMessageType.EXCEPTION
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("updateModelInfo", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+# HELPER FUNCTIONS AND STRUCTURES
+
+
+class registerDataNode_args(object):
+ """
+ Attributes:
+ - req
+
+ """
+
+
+ def __init__(self, req=None,):
+ self.req = req
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == -1:
+ if ftype == TType.STRUCT:
+ self.req = TDataNodeRegisterReq()
+ self.req.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('registerDataNode_args')
+ if self.req is not None:
+ oprot.writeFieldBegin('req', TType.STRUCT, -1)
+ self.req.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(registerDataNode_args)
+registerDataNode_args.thrift_spec = ()
+
+
+class registerDataNode_result(object):
+ """
+ Attributes:
+ - success
+
+ """
+
+
+ def __init__(self, success=None,):
+ self.success = success
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.STRUCT:
+ self.success = TDataNodeRegisterResp()
+ self.success.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('registerDataNode_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(registerDataNode_result)
+registerDataNode_result.thrift_spec = (
+ (0, TType.STRUCT, 'success', [TDataNodeRegisterResp, None], None, ), # 0
+)
+
+
+class restartDataNode_args(object):
+ """
+ Attributes:
+ - req
+
+ """
+
+
+ def __init__(self, req=None,):
+ self.req = req
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == -1:
+ if ftype == TType.STRUCT:
+ self.req = TDataNodeRestartReq()
+ self.req.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('restartDataNode_args')
+ if self.req is not None:
+ oprot.writeFieldBegin('req', TType.STRUCT, -1)
+ self.req.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(restartDataNode_args)
+restartDataNode_args.thrift_spec = ()
+
+
+class restartDataNode_result(object):
+ """
+ Attributes:
+ - success
+
+ """
+
+
+ def __init__(self, success=None,):
+ self.success = success
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.STRUCT:
+ self.success = TDataNodeRestartResp()
+ self.success.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('restartDataNode_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(restartDataNode_result)
+restartDataNode_result.thrift_spec = (
+ (0, TType.STRUCT, 'success', [TDataNodeRestartResp, None], None, ), # 0
+)
+
+
+class getSystemConfiguration_args(object):
+
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('getSystemConfiguration_args')
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(getSystemConfiguration_args)
+getSystemConfiguration_args.thrift_spec = (
+)
+
+
+class getSystemConfiguration_result(object):
+ """
+ Attributes:
+ - success
+
+ """
+
+
+ def __init__(self, success=None,):
+ self.success = success
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.STRUCT:
+ self.success = TSystemConfigurationResp()
+ self.success.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('getSystemConfiguration_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(getSystemConfiguration_result)
+getSystemConfiguration_result.thrift_spec = (
+ (0, TType.STRUCT, 'success', [TSystemConfigurationResp, None], None, ), # 0
+)
+
+
+class removeDataNode_args(object):
+ """
+ Attributes:
+ - req
+
+ """
+
+
+ def __init__(self, req=None,):
+ self.req = req
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == -1:
+ if ftype == TType.STRUCT:
+ self.req = TDataNodeRemoveReq()
+ self.req.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('removeDataNode_args')
+ if self.req is not None:
+ oprot.writeFieldBegin('req', TType.STRUCT, -1)
+ self.req.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(removeDataNode_args)
+removeDataNode_args.thrift_spec = ()
+
+
+class removeDataNode_result(object):
+ """
+ Attributes:
+ - success
+
+ """
+
+
+ def __init__(self, success=None,):
+ self.success = success
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.STRUCT:
+ self.success = TDataNodeRemoveResp()
+ self.success.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('removeDataNode_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(removeDataNode_result)
+removeDataNode_result.thrift_spec = (
+ (0, TType.STRUCT, 'success', [TDataNodeRemoveResp, None], None, ), # 0
+)
+
+
+class updateDataNode_args(object):
+ """
+ Attributes:
+ - req
+
+ """
+
+
+ def __init__(self, req=None,):
+ self.req = req
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == -1:
+ if ftype == TType.STRUCT:
+ self.req = TDataNodeUpdateReq()
+ self.req.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('updateDataNode_args')
+ if self.req is not None:
+ oprot.writeFieldBegin('req', TType.STRUCT, -1)
+ self.req.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(updateDataNode_args)
+updateDataNode_args.thrift_spec = ()
+
+
+class updateDataNode_result(object):
+ """
+ Attributes:
+ - success
+
+ """
+
+
+ def __init__(self, success=None,):
+ self.success = success
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.STRUCT:
+ self.success = TDataNodeRegisterResp()
+ self.success.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('updateDataNode_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(updateDataNode_result)
+updateDataNode_result.thrift_spec = (
+ (0, TType.STRUCT, 'success', [TDataNodeRegisterResp, None], None, ), # 0
+)
+
+
+class getDataNodeConfiguration_args(object):
+ """
+ Attributes:
+ - dataNodeId
+
+ """
+
+
+ def __init__(self, dataNodeId=None,):
+ self.dataNodeId = dataNodeId
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == -1:
+ if ftype == TType.I32:
+ self.dataNodeId = iprot.readI32()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('getDataNodeConfiguration_args')
+ if self.dataNodeId is not None:
+ oprot.writeFieldBegin('dataNodeId', TType.I32, -1)
+ oprot.writeI32(self.dataNodeId)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(getDataNodeConfiguration_args)
+getDataNodeConfiguration_args.thrift_spec = ()
+
+
+class getDataNodeConfiguration_result(object):
+ """
+ Attributes:
+ - success
+
+ """
+
+
+ def __init__(self, success=None,):
+ self.success = success
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.STRUCT:
+ self.success = TDataNodeConfigurationResp()
+ self.success.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('getDataNodeConfiguration_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(getDataNodeConfiguration_result)
+getDataNodeConfiguration_result.thrift_spec = (
+ (0, TType.STRUCT, 'success', [TDataNodeConfigurationResp, None], None, ), # 0
+)
+
+
+class reportRegionMigrateResult_args(object):
+ """
+ Attributes:
+ - req
+
+ """
+
+
+ def __init__(self, req=None,):
+ self.req = req
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == -1:
+ if ftype == TType.STRUCT:
+ self.req = TRegionMigrateResultReportReq()
+ self.req.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('reportRegionMigrateResult_args')
+ if self.req is not None:
+ oprot.writeFieldBegin('req', TType.STRUCT, -1)
+ self.req.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(reportRegionMigrateResult_args)
+reportRegionMigrateResult_args.thrift_spec = ()
+
+
+class reportRegionMigrateResult_result(object):
+ """
+ Attributes:
+ - success
+
+ """
+
+
+ def __init__(self, success=None,):
+ self.success = success
+
+ def read(self, iprot):
+ if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
+ iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.STRUCT:
+ self.success = iotdb.thrift.common.ttypes.TSStatus()
+ self.success.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot._fast_encode is not None and self.thrift_spec is not None:
+ oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
+ return
+ oprot.writeStructBegin('reportRegionMigrateResult_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.items()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+all_structs.append(reportRegionMigrateResult_result)
+reportRegionMigrateResult_result.thrift_spec = (
+ (0, TType.STRUCT, 'success', [iotdb.thrift.common.ttypes.TSStatus, None], None, ), # 0
+)
+
+
+class setStorageGroup_args(object):
+ """
+ Attributes:
+ - req
+
+ """
... 40179 lines suppressed ...