You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@storm.apache.org by bo...@apache.org on 2017/04/06 17:32:53 UTC
[04/52] [partial] storm git commit: STORM-2441 Break down
'storm-core' to extract client (worker) artifacts
http://git-wip-us.apache.org/repos/asf/storm/blob/4de339a8/storm-client/src/jvm/org/apache/storm/generated/WorkerSummary.java
----------------------------------------------------------------------
diff --git a/storm-client/src/jvm/org/apache/storm/generated/WorkerSummary.java b/storm-client/src/jvm/org/apache/storm/generated/WorkerSummary.java
new file mode 100644
index 0000000..fff9373
--- /dev/null
+++ b/storm-client/src/jvm/org/apache/storm/generated/WorkerSummary.java
@@ -0,0 +1,1880 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.storm.generated;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class WorkerSummary implements org.apache.thrift.TBase<WorkerSummary, WorkerSummary._Fields>, java.io.Serializable, Cloneable, Comparable<WorkerSummary> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WorkerSummary");
+
+ private static final org.apache.thrift.protocol.TField SUPERVISOR_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("supervisor_id", org.apache.thrift.protocol.TType.STRING, (short)1);
+ private static final org.apache.thrift.protocol.TField HOST_FIELD_DESC = new org.apache.thrift.protocol.TField("host", org.apache.thrift.protocol.TType.STRING, (short)2);
+ private static final org.apache.thrift.protocol.TField PORT_FIELD_DESC = new org.apache.thrift.protocol.TField("port", org.apache.thrift.protocol.TType.I32, (short)3);
+ private static final org.apache.thrift.protocol.TField TOPOLOGY_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("topology_id", org.apache.thrift.protocol.TType.STRING, (short)4);
+ private static final org.apache.thrift.protocol.TField TOPOLOGY_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("topology_name", org.apache.thrift.protocol.TType.STRING, (short)5);
+ private static final org.apache.thrift.protocol.TField NUM_EXECUTORS_FIELD_DESC = new org.apache.thrift.protocol.TField("num_executors", org.apache.thrift.protocol.TType.I32, (short)6);
+ private static final org.apache.thrift.protocol.TField COMPONENT_TO_NUM_TASKS_FIELD_DESC = new org.apache.thrift.protocol.TField("component_to_num_tasks", org.apache.thrift.protocol.TType.MAP, (short)7);
+ private static final org.apache.thrift.protocol.TField TIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("time_secs", org.apache.thrift.protocol.TType.I32, (short)8);
+ private static final org.apache.thrift.protocol.TField UPTIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("uptime_secs", org.apache.thrift.protocol.TType.I32, (short)9);
+ private static final org.apache.thrift.protocol.TField REQUESTED_MEMONHEAP_FIELD_DESC = new org.apache.thrift.protocol.TField("requested_memonheap", org.apache.thrift.protocol.TType.DOUBLE, (short)521);
+ private static final org.apache.thrift.protocol.TField REQUESTED_MEMOFFHEAP_FIELD_DESC = new org.apache.thrift.protocol.TField("requested_memoffheap", org.apache.thrift.protocol.TType.DOUBLE, (short)522);
+ private static final org.apache.thrift.protocol.TField REQUESTED_CPU_FIELD_DESC = new org.apache.thrift.protocol.TField("requested_cpu", org.apache.thrift.protocol.TType.DOUBLE, (short)523);
+ private static final org.apache.thrift.protocol.TField ASSIGNED_MEMONHEAP_FIELD_DESC = new org.apache.thrift.protocol.TField("assigned_memonheap", org.apache.thrift.protocol.TType.DOUBLE, (short)524);
+ private static final org.apache.thrift.protocol.TField ASSIGNED_MEMOFFHEAP_FIELD_DESC = new org.apache.thrift.protocol.TField("assigned_memoffheap", org.apache.thrift.protocol.TType.DOUBLE, (short)525);
+ private static final org.apache.thrift.protocol.TField ASSIGNED_CPU_FIELD_DESC = new org.apache.thrift.protocol.TField("assigned_cpu", org.apache.thrift.protocol.TType.DOUBLE, (short)526);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new WorkerSummaryStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new WorkerSummaryTupleSchemeFactory());
+ }
+
+ private String supervisor_id; // optional
+ private String host; // optional
+ private int port; // optional
+ private String topology_id; // optional
+ private String topology_name; // optional
+ private int num_executors; // optional
+ private Map<String,Long> component_to_num_tasks; // optional
+ private int time_secs; // optional
+ private int uptime_secs; // optional
+ private double requested_memonheap; // optional
+ private double requested_memoffheap; // optional
+ private double requested_cpu; // optional
+ private double assigned_memonheap; // optional
+ private double assigned_memoffheap; // optional
+ private double assigned_cpu; // optional
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ SUPERVISOR_ID((short)1, "supervisor_id"),
+ HOST((short)2, "host"),
+ PORT((short)3, "port"),
+ TOPOLOGY_ID((short)4, "topology_id"),
+ TOPOLOGY_NAME((short)5, "topology_name"),
+ NUM_EXECUTORS((short)6, "num_executors"),
+ COMPONENT_TO_NUM_TASKS((short)7, "component_to_num_tasks"),
+ TIME_SECS((short)8, "time_secs"),
+ UPTIME_SECS((short)9, "uptime_secs"),
+ REQUESTED_MEMONHEAP((short)521, "requested_memonheap"),
+ REQUESTED_MEMOFFHEAP((short)522, "requested_memoffheap"),
+ REQUESTED_CPU((short)523, "requested_cpu"),
+ ASSIGNED_MEMONHEAP((short)524, "assigned_memonheap"),
+ ASSIGNED_MEMOFFHEAP((short)525, "assigned_memoffheap"),
+ ASSIGNED_CPU((short)526, "assigned_cpu");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // SUPERVISOR_ID
+ return SUPERVISOR_ID;
+ case 2: // HOST
+ return HOST;
+ case 3: // PORT
+ return PORT;
+ case 4: // TOPOLOGY_ID
+ return TOPOLOGY_ID;
+ case 5: // TOPOLOGY_NAME
+ return TOPOLOGY_NAME;
+ case 6: // NUM_EXECUTORS
+ return NUM_EXECUTORS;
+ case 7: // COMPONENT_TO_NUM_TASKS
+ return COMPONENT_TO_NUM_TASKS;
+ case 8: // TIME_SECS
+ return TIME_SECS;
+ case 9: // UPTIME_SECS
+ return UPTIME_SECS;
+ case 521: // REQUESTED_MEMONHEAP
+ return REQUESTED_MEMONHEAP;
+ case 522: // REQUESTED_MEMOFFHEAP
+ return REQUESTED_MEMOFFHEAP;
+ case 523: // REQUESTED_CPU
+ return REQUESTED_CPU;
+ case 524: // ASSIGNED_MEMONHEAP
+ return ASSIGNED_MEMONHEAP;
+ case 525: // ASSIGNED_MEMOFFHEAP
+ return ASSIGNED_MEMOFFHEAP;
+ case 526: // ASSIGNED_CPU
+ return ASSIGNED_CPU;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ private static final int __PORT_ISSET_ID = 0;
+ private static final int __NUM_EXECUTORS_ISSET_ID = 1;
+ private static final int __TIME_SECS_ISSET_ID = 2;
+ private static final int __UPTIME_SECS_ISSET_ID = 3;
+ private static final int __REQUESTED_MEMONHEAP_ISSET_ID = 4;
+ private static final int __REQUESTED_MEMOFFHEAP_ISSET_ID = 5;
+ private static final int __REQUESTED_CPU_ISSET_ID = 6;
+ private static final int __ASSIGNED_MEMONHEAP_ISSET_ID = 7;
+ private static final int __ASSIGNED_MEMOFFHEAP_ISSET_ID = 8;
+ private static final int __ASSIGNED_CPU_ISSET_ID = 9;
+ private short __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.SUPERVISOR_ID,_Fields.HOST,_Fields.PORT,_Fields.TOPOLOGY_ID,_Fields.TOPOLOGY_NAME,_Fields.NUM_EXECUTORS,_Fields.COMPONENT_TO_NUM_TASKS,_Fields.TIME_SECS,_Fields.UPTIME_SECS,_Fields.REQUESTED_MEMONHEAP,_Fields.REQUESTED_MEMOFFHEAP,_Fields.REQUESTED_CPU,_Fields.ASSIGNED_MEMONHEAP,_Fields.ASSIGNED_MEMOFFHEAP,_Fields.ASSIGNED_CPU};
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.SUPERVISOR_ID, new org.apache.thrift.meta_data.FieldMetaData("supervisor_id", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.HOST, new org.apache.thrift.meta_data.FieldMetaData("host", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.PORT, new org.apache.thrift.meta_data.FieldMetaData("port", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+ tmpMap.put(_Fields.TOPOLOGY_ID, new org.apache.thrift.meta_data.FieldMetaData("topology_id", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TOPOLOGY_NAME, new org.apache.thrift.meta_data.FieldMetaData("topology_name", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.NUM_EXECUTORS, new org.apache.thrift.meta_data.FieldMetaData("num_executors", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+ tmpMap.put(_Fields.COMPONENT_TO_NUM_TASKS, new org.apache.thrift.meta_data.FieldMetaData("component_to_num_tasks", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING),
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+ tmpMap.put(_Fields.TIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("time_secs", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+ tmpMap.put(_Fields.UPTIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("uptime_secs", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+ tmpMap.put(_Fields.REQUESTED_MEMONHEAP, new org.apache.thrift.meta_data.FieldMetaData("requested_memonheap", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+ tmpMap.put(_Fields.REQUESTED_MEMOFFHEAP, new org.apache.thrift.meta_data.FieldMetaData("requested_memoffheap", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+ tmpMap.put(_Fields.REQUESTED_CPU, new org.apache.thrift.meta_data.FieldMetaData("requested_cpu", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+ tmpMap.put(_Fields.ASSIGNED_MEMONHEAP, new org.apache.thrift.meta_data.FieldMetaData("assigned_memonheap", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+ tmpMap.put(_Fields.ASSIGNED_MEMOFFHEAP, new org.apache.thrift.meta_data.FieldMetaData("assigned_memoffheap", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+ tmpMap.put(_Fields.ASSIGNED_CPU, new org.apache.thrift.meta_data.FieldMetaData("assigned_cpu", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WorkerSummary.class, metaDataMap);
+ }
+
+ public WorkerSummary() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public WorkerSummary(WorkerSummary other) {
+ __isset_bitfield = other.__isset_bitfield;
+ if (other.is_set_supervisor_id()) {
+ this.supervisor_id = other.supervisor_id;
+ }
+ if (other.is_set_host()) {
+ this.host = other.host;
+ }
+ this.port = other.port;
+ if (other.is_set_topology_id()) {
+ this.topology_id = other.topology_id;
+ }
+ if (other.is_set_topology_name()) {
+ this.topology_name = other.topology_name;
+ }
+ this.num_executors = other.num_executors;
+ if (other.is_set_component_to_num_tasks()) {
+ Map<String,Long> __this__component_to_num_tasks = new HashMap<String,Long>(other.component_to_num_tasks);
+ this.component_to_num_tasks = __this__component_to_num_tasks;
+ }
+ this.time_secs = other.time_secs;
+ this.uptime_secs = other.uptime_secs;
+ this.requested_memonheap = other.requested_memonheap;
+ this.requested_memoffheap = other.requested_memoffheap;
+ this.requested_cpu = other.requested_cpu;
+ this.assigned_memonheap = other.assigned_memonheap;
+ this.assigned_memoffheap = other.assigned_memoffheap;
+ this.assigned_cpu = other.assigned_cpu;
+ }
+
+ public WorkerSummary deepCopy() {
+ return new WorkerSummary(this);
+ }
+
+ @Override
+ public void clear() {
+ this.supervisor_id = null;
+ this.host = null;
+ set_port_isSet(false);
+ this.port = 0;
+ this.topology_id = null;
+ this.topology_name = null;
+ set_num_executors_isSet(false);
+ this.num_executors = 0;
+ this.component_to_num_tasks = null;
+ set_time_secs_isSet(false);
+ this.time_secs = 0;
+ set_uptime_secs_isSet(false);
+ this.uptime_secs = 0;
+ set_requested_memonheap_isSet(false);
+ this.requested_memonheap = 0.0;
+ set_requested_memoffheap_isSet(false);
+ this.requested_memoffheap = 0.0;
+ set_requested_cpu_isSet(false);
+ this.requested_cpu = 0.0;
+ set_assigned_memonheap_isSet(false);
+ this.assigned_memonheap = 0.0;
+ set_assigned_memoffheap_isSet(false);
+ this.assigned_memoffheap = 0.0;
+ set_assigned_cpu_isSet(false);
+ this.assigned_cpu = 0.0;
+ }
+
+ public String get_supervisor_id() {
+ return this.supervisor_id;
+ }
+
+ public void set_supervisor_id(String supervisor_id) {
+ this.supervisor_id = supervisor_id;
+ }
+
+ public void unset_supervisor_id() {
+ this.supervisor_id = null;
+ }
+
+ /** Returns true if field supervisor_id is set (has been assigned a value) and false otherwise */
+ public boolean is_set_supervisor_id() {
+ return this.supervisor_id != null;
+ }
+
+ public void set_supervisor_id_isSet(boolean value) {
+ if (!value) {
+ this.supervisor_id = null;
+ }
+ }
+
+ public String get_host() {
+ return this.host;
+ }
+
+ public void set_host(String host) {
+ this.host = host;
+ }
+
+ public void unset_host() {
+ this.host = null;
+ }
+
+ /** Returns true if field host is set (has been assigned a value) and false otherwise */
+ public boolean is_set_host() {
+ return this.host != null;
+ }
+
+ public void set_host_isSet(boolean value) {
+ if (!value) {
+ this.host = null;
+ }
+ }
+
+ public int get_port() {
+ return this.port;
+ }
+
+ public void set_port(int port) {
+ this.port = port;
+ set_port_isSet(true);
+ }
+
+ public void unset_port() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PORT_ISSET_ID);
+ }
+
+ /** Returns true if field port is set (has been assigned a value) and false otherwise */
+ public boolean is_set_port() {
+ return EncodingUtils.testBit(__isset_bitfield, __PORT_ISSET_ID);
+ }
+
+ public void set_port_isSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PORT_ISSET_ID, value);
+ }
+
+ public String get_topology_id() {
+ return this.topology_id;
+ }
+
+ public void set_topology_id(String topology_id) {
+ this.topology_id = topology_id;
+ }
+
+ public void unset_topology_id() {
+ this.topology_id = null;
+ }
+
+ /** Returns true if field topology_id is set (has been assigned a value) and false otherwise */
+ public boolean is_set_topology_id() {
+ return this.topology_id != null;
+ }
+
+ public void set_topology_id_isSet(boolean value) {
+ if (!value) {
+ this.topology_id = null;
+ }
+ }
+
+ public String get_topology_name() {
+ return this.topology_name;
+ }
+
+ public void set_topology_name(String topology_name) {
+ this.topology_name = topology_name;
+ }
+
+ public void unset_topology_name() {
+ this.topology_name = null;
+ }
+
+ /** Returns true if field topology_name is set (has been assigned a value) and false otherwise */
+ public boolean is_set_topology_name() {
+ return this.topology_name != null;
+ }
+
+ public void set_topology_name_isSet(boolean value) {
+ if (!value) {
+ this.topology_name = null;
+ }
+ }
+
+ public int get_num_executors() {
+ return this.num_executors;
+ }
+
+ public void set_num_executors(int num_executors) {
+ this.num_executors = num_executors;
+ set_num_executors_isSet(true);
+ }
+
+ public void unset_num_executors() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUM_EXECUTORS_ISSET_ID);
+ }
+
+ /** Returns true if field num_executors is set (has been assigned a value) and false otherwise */
+ public boolean is_set_num_executors() {
+ return EncodingUtils.testBit(__isset_bitfield, __NUM_EXECUTORS_ISSET_ID);
+ }
+
+ public void set_num_executors_isSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUM_EXECUTORS_ISSET_ID, value);
+ }
+
+ public int get_component_to_num_tasks_size() {
+ return (this.component_to_num_tasks == null) ? 0 : this.component_to_num_tasks.size();
+ }
+
+ public void put_to_component_to_num_tasks(String key, long val) {
+ if (this.component_to_num_tasks == null) {
+ this.component_to_num_tasks = new HashMap<String,Long>();
+ }
+ this.component_to_num_tasks.put(key, val);
+ }
+
+ public Map<String,Long> get_component_to_num_tasks() {
+ return this.component_to_num_tasks;
+ }
+
+ public void set_component_to_num_tasks(Map<String,Long> component_to_num_tasks) {
+ this.component_to_num_tasks = component_to_num_tasks;
+ }
+
+ public void unset_component_to_num_tasks() {
+ this.component_to_num_tasks = null;
+ }
+
+ /** Returns true if field component_to_num_tasks is set (has been assigned a value) and false otherwise */
+ public boolean is_set_component_to_num_tasks() {
+ return this.component_to_num_tasks != null;
+ }
+
+ public void set_component_to_num_tasks_isSet(boolean value) {
+ if (!value) {
+ this.component_to_num_tasks = null;
+ }
+ }
+
+ public int get_time_secs() {
+ return this.time_secs;
+ }
+
+ public void set_time_secs(int time_secs) {
+ this.time_secs = time_secs;
+ set_time_secs_isSet(true);
+ }
+
+ public void unset_time_secs() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIME_SECS_ISSET_ID);
+ }
+
+ /** Returns true if field time_secs is set (has been assigned a value) and false otherwise */
+ public boolean is_set_time_secs() {
+ return EncodingUtils.testBit(__isset_bitfield, __TIME_SECS_ISSET_ID);
+ }
+
+ public void set_time_secs_isSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIME_SECS_ISSET_ID, value);
+ }
+
+ public int get_uptime_secs() {
+ return this.uptime_secs;
+ }
+
+ public void set_uptime_secs(int uptime_secs) {
+ this.uptime_secs = uptime_secs;
+ set_uptime_secs_isSet(true);
+ }
+
+ public void unset_uptime_secs() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID);
+ }
+
+ /** Returns true if field uptime_secs is set (has been assigned a value) and false otherwise */
+ public boolean is_set_uptime_secs() {
+ return EncodingUtils.testBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID);
+ }
+
+ public void set_uptime_secs_isSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID, value);
+ }
+
+ public double get_requested_memonheap() {
+ return this.requested_memonheap;
+ }
+
+ public void set_requested_memonheap(double requested_memonheap) {
+ this.requested_memonheap = requested_memonheap;
+ set_requested_memonheap_isSet(true);
+ }
+
+ public void unset_requested_memonheap() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REQUESTED_MEMONHEAP_ISSET_ID);
+ }
+
+ /** Returns true if field requested_memonheap is set (has been assigned a value) and false otherwise */
+ public boolean is_set_requested_memonheap() {
+ return EncodingUtils.testBit(__isset_bitfield, __REQUESTED_MEMONHEAP_ISSET_ID);
+ }
+
+ public void set_requested_memonheap_isSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REQUESTED_MEMONHEAP_ISSET_ID, value);
+ }
+
+ public double get_requested_memoffheap() {
+ return this.requested_memoffheap;
+ }
+
+ public void set_requested_memoffheap(double requested_memoffheap) {
+ this.requested_memoffheap = requested_memoffheap;
+ set_requested_memoffheap_isSet(true);
+ }
+
+ public void unset_requested_memoffheap() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REQUESTED_MEMOFFHEAP_ISSET_ID);
+ }
+
+ /** Returns true if field requested_memoffheap is set (has been assigned a value) and false otherwise */
+ public boolean is_set_requested_memoffheap() {
+ return EncodingUtils.testBit(__isset_bitfield, __REQUESTED_MEMOFFHEAP_ISSET_ID);
+ }
+
+ public void set_requested_memoffheap_isSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REQUESTED_MEMOFFHEAP_ISSET_ID, value);
+ }
+
+ public double get_requested_cpu() {
+ return this.requested_cpu;
+ }
+
+ public void set_requested_cpu(double requested_cpu) {
+ this.requested_cpu = requested_cpu;
+ set_requested_cpu_isSet(true);
+ }
+
+ public void unset_requested_cpu() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REQUESTED_CPU_ISSET_ID);
+ }
+
+ /** Returns true if field requested_cpu is set (has been assigned a value) and false otherwise */
+ public boolean is_set_requested_cpu() {
+ return EncodingUtils.testBit(__isset_bitfield, __REQUESTED_CPU_ISSET_ID);
+ }
+
+ public void set_requested_cpu_isSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REQUESTED_CPU_ISSET_ID, value);
+ }
+
+ public double get_assigned_memonheap() {
+ return this.assigned_memonheap;
+ }
+
+ public void set_assigned_memonheap(double assigned_memonheap) {
+ this.assigned_memonheap = assigned_memonheap;
+ set_assigned_memonheap_isSet(true);
+ }
+
+ public void unset_assigned_memonheap() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ASSIGNED_MEMONHEAP_ISSET_ID);
+ }
+
+ /** Returns true if field assigned_memonheap is set (has been assigned a value) and false otherwise */
+ public boolean is_set_assigned_memonheap() {
+ return EncodingUtils.testBit(__isset_bitfield, __ASSIGNED_MEMONHEAP_ISSET_ID);
+ }
+
+ public void set_assigned_memonheap_isSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ASSIGNED_MEMONHEAP_ISSET_ID, value);
+ }
+
+ public double get_assigned_memoffheap() {
+ return this.assigned_memoffheap;
+ }
+
+ public void set_assigned_memoffheap(double assigned_memoffheap) {
+ this.assigned_memoffheap = assigned_memoffheap;
+ set_assigned_memoffheap_isSet(true);
+ }
+
+ public void unset_assigned_memoffheap() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ASSIGNED_MEMOFFHEAP_ISSET_ID);
+ }
+
+ /** Returns true if field assigned_memoffheap is set (has been assigned a value) and false otherwise */
+ public boolean is_set_assigned_memoffheap() {
+ return EncodingUtils.testBit(__isset_bitfield, __ASSIGNED_MEMOFFHEAP_ISSET_ID);
+ }
+
+ public void set_assigned_memoffheap_isSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ASSIGNED_MEMOFFHEAP_ISSET_ID, value);
+ }
+
+ public double get_assigned_cpu() {
+ return this.assigned_cpu;
+ }
+
+ public void set_assigned_cpu(double assigned_cpu) {
+ this.assigned_cpu = assigned_cpu;
+ set_assigned_cpu_isSet(true);
+ }
+
+ public void unset_assigned_cpu() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ASSIGNED_CPU_ISSET_ID);
+ }
+
+ /** Returns true if field assigned_cpu is set (has been assigned a value) and false otherwise */
+ public boolean is_set_assigned_cpu() {
+ return EncodingUtils.testBit(__isset_bitfield, __ASSIGNED_CPU_ISSET_ID);
+ }
+
+ public void set_assigned_cpu_isSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ASSIGNED_CPU_ISSET_ID, value);
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case SUPERVISOR_ID:
+ if (value == null) {
+ unset_supervisor_id();
+ } else {
+ set_supervisor_id((String)value);
+ }
+ break;
+
+ case HOST:
+ if (value == null) {
+ unset_host();
+ } else {
+ set_host((String)value);
+ }
+ break;
+
+ case PORT:
+ if (value == null) {
+ unset_port();
+ } else {
+ set_port((Integer)value);
+ }
+ break;
+
+ case TOPOLOGY_ID:
+ if (value == null) {
+ unset_topology_id();
+ } else {
+ set_topology_id((String)value);
+ }
+ break;
+
+ case TOPOLOGY_NAME:
+ if (value == null) {
+ unset_topology_name();
+ } else {
+ set_topology_name((String)value);
+ }
+ break;
+
+ case NUM_EXECUTORS:
+ if (value == null) {
+ unset_num_executors();
+ } else {
+ set_num_executors((Integer)value);
+ }
+ break;
+
+ case COMPONENT_TO_NUM_TASKS:
+ if (value == null) {
+ unset_component_to_num_tasks();
+ } else {
+ set_component_to_num_tasks((Map<String,Long>)value);
+ }
+ break;
+
+ case TIME_SECS:
+ if (value == null) {
+ unset_time_secs();
+ } else {
+ set_time_secs((Integer)value);
+ }
+ break;
+
+ case UPTIME_SECS:
+ if (value == null) {
+ unset_uptime_secs();
+ } else {
+ set_uptime_secs((Integer)value);
+ }
+ break;
+
+ case REQUESTED_MEMONHEAP:
+ if (value == null) {
+ unset_requested_memonheap();
+ } else {
+ set_requested_memonheap((Double)value);
+ }
+ break;
+
+ case REQUESTED_MEMOFFHEAP:
+ if (value == null) {
+ unset_requested_memoffheap();
+ } else {
+ set_requested_memoffheap((Double)value);
+ }
+ break;
+
+ case REQUESTED_CPU:
+ if (value == null) {
+ unset_requested_cpu();
+ } else {
+ set_requested_cpu((Double)value);
+ }
+ break;
+
+ case ASSIGNED_MEMONHEAP:
+ if (value == null) {
+ unset_assigned_memonheap();
+ } else {
+ set_assigned_memonheap((Double)value);
+ }
+ break;
+
+ case ASSIGNED_MEMOFFHEAP:
+ if (value == null) {
+ unset_assigned_memoffheap();
+ } else {
+ set_assigned_memoffheap((Double)value);
+ }
+ break;
+
+ case ASSIGNED_CPU:
+ if (value == null) {
+ unset_assigned_cpu();
+ } else {
+ set_assigned_cpu((Double)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case SUPERVISOR_ID:
+ return get_supervisor_id();
+
+ case HOST:
+ return get_host();
+
+ case PORT:
+ return get_port();
+
+ case TOPOLOGY_ID:
+ return get_topology_id();
+
+ case TOPOLOGY_NAME:
+ return get_topology_name();
+
+ case NUM_EXECUTORS:
+ return get_num_executors();
+
+ case COMPONENT_TO_NUM_TASKS:
+ return get_component_to_num_tasks();
+
+ case TIME_SECS:
+ return get_time_secs();
+
+ case UPTIME_SECS:
+ return get_uptime_secs();
+
+ case REQUESTED_MEMONHEAP:
+ return get_requested_memonheap();
+
+ case REQUESTED_MEMOFFHEAP:
+ return get_requested_memoffheap();
+
+ case REQUESTED_CPU:
+ return get_requested_cpu();
+
+ case ASSIGNED_MEMONHEAP:
+ return get_assigned_memonheap();
+
+ case ASSIGNED_MEMOFFHEAP:
+ return get_assigned_memoffheap();
+
+ case ASSIGNED_CPU:
+ return get_assigned_cpu();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case SUPERVISOR_ID:
+ return is_set_supervisor_id();
+ case HOST:
+ return is_set_host();
+ case PORT:
+ return is_set_port();
+ case TOPOLOGY_ID:
+ return is_set_topology_id();
+ case TOPOLOGY_NAME:
+ return is_set_topology_name();
+ case NUM_EXECUTORS:
+ return is_set_num_executors();
+ case COMPONENT_TO_NUM_TASKS:
+ return is_set_component_to_num_tasks();
+ case TIME_SECS:
+ return is_set_time_secs();
+ case UPTIME_SECS:
+ return is_set_uptime_secs();
+ case REQUESTED_MEMONHEAP:
+ return is_set_requested_memonheap();
+ case REQUESTED_MEMOFFHEAP:
+ return is_set_requested_memoffheap();
+ case REQUESTED_CPU:
+ return is_set_requested_cpu();
+ case ASSIGNED_MEMONHEAP:
+ return is_set_assigned_memonheap();
+ case ASSIGNED_MEMOFFHEAP:
+ return is_set_assigned_memoffheap();
+ case ASSIGNED_CPU:
+ return is_set_assigned_cpu();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof WorkerSummary)
+ return this.equals((WorkerSummary)that);
+ return false;
+ }
+
+ public boolean equals(WorkerSummary that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_supervisor_id = true && this.is_set_supervisor_id();
+ boolean that_present_supervisor_id = true && that.is_set_supervisor_id();
+ if (this_present_supervisor_id || that_present_supervisor_id) {
+ if (!(this_present_supervisor_id && that_present_supervisor_id))
+ return false;
+ if (!this.supervisor_id.equals(that.supervisor_id))
+ return false;
+ }
+
+ boolean this_present_host = true && this.is_set_host();
+ boolean that_present_host = true && that.is_set_host();
+ if (this_present_host || that_present_host) {
+ if (!(this_present_host && that_present_host))
+ return false;
+ if (!this.host.equals(that.host))
+ return false;
+ }
+
+ boolean this_present_port = true && this.is_set_port();
+ boolean that_present_port = true && that.is_set_port();
+ if (this_present_port || that_present_port) {
+ if (!(this_present_port && that_present_port))
+ return false;
+ if (this.port != that.port)
+ return false;
+ }
+
+ boolean this_present_topology_id = true && this.is_set_topology_id();
+ boolean that_present_topology_id = true && that.is_set_topology_id();
+ if (this_present_topology_id || that_present_topology_id) {
+ if (!(this_present_topology_id && that_present_topology_id))
+ return false;
+ if (!this.topology_id.equals(that.topology_id))
+ return false;
+ }
+
+ boolean this_present_topology_name = true && this.is_set_topology_name();
+ boolean that_present_topology_name = true && that.is_set_topology_name();
+ if (this_present_topology_name || that_present_topology_name) {
+ if (!(this_present_topology_name && that_present_topology_name))
+ return false;
+ if (!this.topology_name.equals(that.topology_name))
+ return false;
+ }
+
+ boolean this_present_num_executors = true && this.is_set_num_executors();
+ boolean that_present_num_executors = true && that.is_set_num_executors();
+ if (this_present_num_executors || that_present_num_executors) {
+ if (!(this_present_num_executors && that_present_num_executors))
+ return false;
+ if (this.num_executors != that.num_executors)
+ return false;
+ }
+
+ boolean this_present_component_to_num_tasks = true && this.is_set_component_to_num_tasks();
+ boolean that_present_component_to_num_tasks = true && that.is_set_component_to_num_tasks();
+ if (this_present_component_to_num_tasks || that_present_component_to_num_tasks) {
+ if (!(this_present_component_to_num_tasks && that_present_component_to_num_tasks))
+ return false;
+ if (!this.component_to_num_tasks.equals(that.component_to_num_tasks))
+ return false;
+ }
+
+ boolean this_present_time_secs = true && this.is_set_time_secs();
+ boolean that_present_time_secs = true && that.is_set_time_secs();
+ if (this_present_time_secs || that_present_time_secs) {
+ if (!(this_present_time_secs && that_present_time_secs))
+ return false;
+ if (this.time_secs != that.time_secs)
+ return false;
+ }
+
+ boolean this_present_uptime_secs = true && this.is_set_uptime_secs();
+ boolean that_present_uptime_secs = true && that.is_set_uptime_secs();
+ if (this_present_uptime_secs || that_present_uptime_secs) {
+ if (!(this_present_uptime_secs && that_present_uptime_secs))
+ return false;
+ if (this.uptime_secs != that.uptime_secs)
+ return false;
+ }
+
+ boolean this_present_requested_memonheap = true && this.is_set_requested_memonheap();
+ boolean that_present_requested_memonheap = true && that.is_set_requested_memonheap();
+ if (this_present_requested_memonheap || that_present_requested_memonheap) {
+ if (!(this_present_requested_memonheap && that_present_requested_memonheap))
+ return false;
+ if (this.requested_memonheap != that.requested_memonheap)
+ return false;
+ }
+
+ boolean this_present_requested_memoffheap = true && this.is_set_requested_memoffheap();
+ boolean that_present_requested_memoffheap = true && that.is_set_requested_memoffheap();
+ if (this_present_requested_memoffheap || that_present_requested_memoffheap) {
+ if (!(this_present_requested_memoffheap && that_present_requested_memoffheap))
+ return false;
+ if (this.requested_memoffheap != that.requested_memoffheap)
+ return false;
+ }
+
+ boolean this_present_requested_cpu = true && this.is_set_requested_cpu();
+ boolean that_present_requested_cpu = true && that.is_set_requested_cpu();
+ if (this_present_requested_cpu || that_present_requested_cpu) {
+ if (!(this_present_requested_cpu && that_present_requested_cpu))
+ return false;
+ if (this.requested_cpu != that.requested_cpu)
+ return false;
+ }
+
+ boolean this_present_assigned_memonheap = true && this.is_set_assigned_memonheap();
+ boolean that_present_assigned_memonheap = true && that.is_set_assigned_memonheap();
+ if (this_present_assigned_memonheap || that_present_assigned_memonheap) {
+ if (!(this_present_assigned_memonheap && that_present_assigned_memonheap))
+ return false;
+ if (this.assigned_memonheap != that.assigned_memonheap)
+ return false;
+ }
+
+ boolean this_present_assigned_memoffheap = true && this.is_set_assigned_memoffheap();
+ boolean that_present_assigned_memoffheap = true && that.is_set_assigned_memoffheap();
+ if (this_present_assigned_memoffheap || that_present_assigned_memoffheap) {
+ if (!(this_present_assigned_memoffheap && that_present_assigned_memoffheap))
+ return false;
+ if (this.assigned_memoffheap != that.assigned_memoffheap)
+ return false;
+ }
+
+ boolean this_present_assigned_cpu = true && this.is_set_assigned_cpu();
+ boolean that_present_assigned_cpu = true && that.is_set_assigned_cpu();
+ if (this_present_assigned_cpu || that_present_assigned_cpu) {
+ if (!(this_present_assigned_cpu && that_present_assigned_cpu))
+ return false;
+ if (this.assigned_cpu != that.assigned_cpu)
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_supervisor_id = true && (is_set_supervisor_id());
+ list.add(present_supervisor_id);
+ if (present_supervisor_id)
+ list.add(supervisor_id);
+
+ boolean present_host = true && (is_set_host());
+ list.add(present_host);
+ if (present_host)
+ list.add(host);
+
+ boolean present_port = true && (is_set_port());
+ list.add(present_port);
+ if (present_port)
+ list.add(port);
+
+ boolean present_topology_id = true && (is_set_topology_id());
+ list.add(present_topology_id);
+ if (present_topology_id)
+ list.add(topology_id);
+
+ boolean present_topology_name = true && (is_set_topology_name());
+ list.add(present_topology_name);
+ if (present_topology_name)
+ list.add(topology_name);
+
+ boolean present_num_executors = true && (is_set_num_executors());
+ list.add(present_num_executors);
+ if (present_num_executors)
+ list.add(num_executors);
+
+ boolean present_component_to_num_tasks = true && (is_set_component_to_num_tasks());
+ list.add(present_component_to_num_tasks);
+ if (present_component_to_num_tasks)
+ list.add(component_to_num_tasks);
+
+ boolean present_time_secs = true && (is_set_time_secs());
+ list.add(present_time_secs);
+ if (present_time_secs)
+ list.add(time_secs);
+
+ boolean present_uptime_secs = true && (is_set_uptime_secs());
+ list.add(present_uptime_secs);
+ if (present_uptime_secs)
+ list.add(uptime_secs);
+
+ boolean present_requested_memonheap = true && (is_set_requested_memonheap());
+ list.add(present_requested_memonheap);
+ if (present_requested_memonheap)
+ list.add(requested_memonheap);
+
+ boolean present_requested_memoffheap = true && (is_set_requested_memoffheap());
+ list.add(present_requested_memoffheap);
+ if (present_requested_memoffheap)
+ list.add(requested_memoffheap);
+
+ boolean present_requested_cpu = true && (is_set_requested_cpu());
+ list.add(present_requested_cpu);
+ if (present_requested_cpu)
+ list.add(requested_cpu);
+
+ boolean present_assigned_memonheap = true && (is_set_assigned_memonheap());
+ list.add(present_assigned_memonheap);
+ if (present_assigned_memonheap)
+ list.add(assigned_memonheap);
+
+ boolean present_assigned_memoffheap = true && (is_set_assigned_memoffheap());
+ list.add(present_assigned_memoffheap);
+ if (present_assigned_memoffheap)
+ list.add(assigned_memoffheap);
+
+ boolean present_assigned_cpu = true && (is_set_assigned_cpu());
+ list.add(present_assigned_cpu);
+ if (present_assigned_cpu)
+ list.add(assigned_cpu);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(WorkerSummary other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(is_set_supervisor_id()).compareTo(other.is_set_supervisor_id());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_supervisor_id()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.supervisor_id, other.supervisor_id);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_host()).compareTo(other.is_set_host());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_host()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.host, other.host);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_port()).compareTo(other.is_set_port());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_port()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.port, other.port);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_topology_id()).compareTo(other.is_set_topology_id());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_topology_id()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.topology_id, other.topology_id);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_topology_name()).compareTo(other.is_set_topology_name());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_topology_name()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.topology_name, other.topology_name);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_num_executors()).compareTo(other.is_set_num_executors());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_num_executors()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num_executors, other.num_executors);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_component_to_num_tasks()).compareTo(other.is_set_component_to_num_tasks());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_component_to_num_tasks()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.component_to_num_tasks, other.component_to_num_tasks);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_time_secs()).compareTo(other.is_set_time_secs());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_time_secs()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.time_secs, other.time_secs);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_uptime_secs()).compareTo(other.is_set_uptime_secs());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_uptime_secs()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uptime_secs, other.uptime_secs);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_requested_memonheap()).compareTo(other.is_set_requested_memonheap());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_requested_memonheap()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.requested_memonheap, other.requested_memonheap);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_requested_memoffheap()).compareTo(other.is_set_requested_memoffheap());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_requested_memoffheap()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.requested_memoffheap, other.requested_memoffheap);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_requested_cpu()).compareTo(other.is_set_requested_cpu());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_requested_cpu()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.requested_cpu, other.requested_cpu);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_assigned_memonheap()).compareTo(other.is_set_assigned_memonheap());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_assigned_memonheap()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.assigned_memonheap, other.assigned_memonheap);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_assigned_memoffheap()).compareTo(other.is_set_assigned_memoffheap());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_assigned_memoffheap()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.assigned_memoffheap, other.assigned_memoffheap);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(is_set_assigned_cpu()).compareTo(other.is_set_assigned_cpu());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (is_set_assigned_cpu()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.assigned_cpu, other.assigned_cpu);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("WorkerSummary(");
+ boolean first = true;
+
+ if (is_set_supervisor_id()) {
+ sb.append("supervisor_id:");
+ if (this.supervisor_id == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.supervisor_id);
+ }
+ first = false;
+ }
+ if (is_set_host()) {
+ if (!first) sb.append(", ");
+ sb.append("host:");
+ if (this.host == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.host);
+ }
+ first = false;
+ }
+ if (is_set_port()) {
+ if (!first) sb.append(", ");
+ sb.append("port:");
+ sb.append(this.port);
+ first = false;
+ }
+ if (is_set_topology_id()) {
+ if (!first) sb.append(", ");
+ sb.append("topology_id:");
+ if (this.topology_id == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.topology_id);
+ }
+ first = false;
+ }
+ if (is_set_topology_name()) {
+ if (!first) sb.append(", ");
+ sb.append("topology_name:");
+ if (this.topology_name == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.topology_name);
+ }
+ first = false;
+ }
+ if (is_set_num_executors()) {
+ if (!first) sb.append(", ");
+ sb.append("num_executors:");
+ sb.append(this.num_executors);
+ first = false;
+ }
+ if (is_set_component_to_num_tasks()) {
+ if (!first) sb.append(", ");
+ sb.append("component_to_num_tasks:");
+ if (this.component_to_num_tasks == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.component_to_num_tasks);
+ }
+ first = false;
+ }
+ if (is_set_time_secs()) {
+ if (!first) sb.append(", ");
+ sb.append("time_secs:");
+ sb.append(this.time_secs);
+ first = false;
+ }
+ if (is_set_uptime_secs()) {
+ if (!first) sb.append(", ");
+ sb.append("uptime_secs:");
+ sb.append(this.uptime_secs);
+ first = false;
+ }
+ if (is_set_requested_memonheap()) {
+ if (!first) sb.append(", ");
+ sb.append("requested_memonheap:");
+ sb.append(this.requested_memonheap);
+ first = false;
+ }
+ if (is_set_requested_memoffheap()) {
+ if (!first) sb.append(", ");
+ sb.append("requested_memoffheap:");
+ sb.append(this.requested_memoffheap);
+ first = false;
+ }
+ if (is_set_requested_cpu()) {
+ if (!first) sb.append(", ");
+ sb.append("requested_cpu:");
+ sb.append(this.requested_cpu);
+ first = false;
+ }
+ if (is_set_assigned_memonheap()) {
+ if (!first) sb.append(", ");
+ sb.append("assigned_memonheap:");
+ sb.append(this.assigned_memonheap);
+ first = false;
+ }
+ if (is_set_assigned_memoffheap()) {
+ if (!first) sb.append(", ");
+ sb.append("assigned_memoffheap:");
+ sb.append(this.assigned_memoffheap);
+ first = false;
+ }
+ if (is_set_assigned_cpu()) {
+ if (!first) sb.append(", ");
+ sb.append("assigned_cpu:");
+ sb.append(this.assigned_cpu);
+ first = false;
+ }
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class WorkerSummaryStandardSchemeFactory implements SchemeFactory {
+ public WorkerSummaryStandardScheme getScheme() {
+ return new WorkerSummaryStandardScheme();
+ }
+ }
+
+ private static class WorkerSummaryStandardScheme extends StandardScheme<WorkerSummary> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, WorkerSummary struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // SUPERVISOR_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.supervisor_id = iprot.readString();
+ struct.set_supervisor_id_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 2: // HOST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.host = iprot.readString();
+ struct.set_host_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 3: // PORT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.port = iprot.readI32();
+ struct.set_port_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 4: // TOPOLOGY_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.topology_id = iprot.readString();
+ struct.set_topology_id_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 5: // TOPOLOGY_NAME
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.topology_name = iprot.readString();
+ struct.set_topology_name_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 6: // NUM_EXECUTORS
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.num_executors = iprot.readI32();
+ struct.set_num_executors_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 7: // COMPONENT_TO_NUM_TASKS
+ if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+ {
+ org.apache.thrift.protocol.TMap _map428 = iprot.readMapBegin();
+ struct.component_to_num_tasks = new HashMap<String,Long>(2*_map428.size);
+ String _key429;
+ long _val430;
+ for (int _i431 = 0; _i431 < _map428.size; ++_i431)
+ {
+ _key429 = iprot.readString();
+ _val430 = iprot.readI64();
+ struct.component_to_num_tasks.put(_key429, _val430);
+ }
+ iprot.readMapEnd();
+ }
+ struct.set_component_to_num_tasks_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 8: // TIME_SECS
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.time_secs = iprot.readI32();
+ struct.set_time_secs_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 9: // UPTIME_SECS
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.uptime_secs = iprot.readI32();
+ struct.set_uptime_secs_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 521: // REQUESTED_MEMONHEAP
+ if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+ struct.requested_memonheap = iprot.readDouble();
+ struct.set_requested_memonheap_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 522: // REQUESTED_MEMOFFHEAP
+ if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+ struct.requested_memoffheap = iprot.readDouble();
+ struct.set_requested_memoffheap_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 523: // REQUESTED_CPU
+ if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+ struct.requested_cpu = iprot.readDouble();
+ struct.set_requested_cpu_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 524: // ASSIGNED_MEMONHEAP
+ if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+ struct.assigned_memonheap = iprot.readDouble();
+ struct.set_assigned_memonheap_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 525: // ASSIGNED_MEMOFFHEAP
+ if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+ struct.assigned_memoffheap = iprot.readDouble();
+ struct.set_assigned_memoffheap_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 526: // ASSIGNED_CPU
+ if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+ struct.assigned_cpu = iprot.readDouble();
+ struct.set_assigned_cpu_isSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, WorkerSummary struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.supervisor_id != null) {
+ if (struct.is_set_supervisor_id()) {
+ oprot.writeFieldBegin(SUPERVISOR_ID_FIELD_DESC);
+ oprot.writeString(struct.supervisor_id);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.host != null) {
+ if (struct.is_set_host()) {
+ oprot.writeFieldBegin(HOST_FIELD_DESC);
+ oprot.writeString(struct.host);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.is_set_port()) {
+ oprot.writeFieldBegin(PORT_FIELD_DESC);
+ oprot.writeI32(struct.port);
+ oprot.writeFieldEnd();
+ }
+ if (struct.topology_id != null) {
+ if (struct.is_set_topology_id()) {
+ oprot.writeFieldBegin(TOPOLOGY_ID_FIELD_DESC);
+ oprot.writeString(struct.topology_id);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.topology_name != null) {
+ if (struct.is_set_topology_name()) {
+ oprot.writeFieldBegin(TOPOLOGY_NAME_FIELD_DESC);
+ oprot.writeString(struct.topology_name);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.is_set_num_executors()) {
+ oprot.writeFieldBegin(NUM_EXECUTORS_FIELD_DESC);
+ oprot.writeI32(struct.num_executors);
+ oprot.writeFieldEnd();
+ }
+ if (struct.component_to_num_tasks != null) {
+ if (struct.is_set_component_to_num_tasks()) {
+ oprot.writeFieldBegin(COMPONENT_TO_NUM_TASKS_FIELD_DESC);
+ {
+ oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, struct.component_to_num_tasks.size()));
+ for (Map.Entry<String, Long> _iter432 : struct.component_to_num_tasks.entrySet())
+ {
+ oprot.writeString(_iter432.getKey());
+ oprot.writeI64(_iter432.getValue());
+ }
+ oprot.writeMapEnd();
+ }
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.is_set_time_secs()) {
+ oprot.writeFieldBegin(TIME_SECS_FIELD_DESC);
+ oprot.writeI32(struct.time_secs);
+ oprot.writeFieldEnd();
+ }
+ if (struct.is_set_uptime_secs()) {
+ oprot.writeFieldBegin(UPTIME_SECS_FIELD_DESC);
+ oprot.writeI32(struct.uptime_secs);
+ oprot.writeFieldEnd();
+ }
+ if (struct.is_set_requested_memonheap()) {
+ oprot.writeFieldBegin(REQUESTED_MEMONHEAP_FIELD_DESC);
+ oprot.writeDouble(struct.requested_memonheap);
+ oprot.writeFieldEnd();
+ }
+ if (struct.is_set_requested_memoffheap()) {
+ oprot.writeFieldBegin(REQUESTED_MEMOFFHEAP_FIELD_DESC);
+ oprot.writeDouble(struct.requested_memoffheap);
+ oprot.writeFieldEnd();
+ }
+ if (struct.is_set_requested_cpu()) {
+ oprot.writeFieldBegin(REQUESTED_CPU_FIELD_DESC);
+ oprot.writeDouble(struct.requested_cpu);
+ oprot.writeFieldEnd();
+ }
+ if (struct.is_set_assigned_memonheap()) {
+ oprot.writeFieldBegin(ASSIGNED_MEMONHEAP_FIELD_DESC);
+ oprot.writeDouble(struct.assigned_memonheap);
+ oprot.writeFieldEnd();
+ }
+ if (struct.is_set_assigned_memoffheap()) {
+ oprot.writeFieldBegin(ASSIGNED_MEMOFFHEAP_FIELD_DESC);
+ oprot.writeDouble(struct.assigned_memoffheap);
+ oprot.writeFieldEnd();
+ }
+ if (struct.is_set_assigned_cpu()) {
+ oprot.writeFieldBegin(ASSIGNED_CPU_FIELD_DESC);
+ oprot.writeDouble(struct.assigned_cpu);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class WorkerSummaryTupleSchemeFactory implements SchemeFactory {
+ public WorkerSummaryTupleScheme getScheme() {
+ return new WorkerSummaryTupleScheme();
+ }
+ }
+
+ private static class WorkerSummaryTupleScheme extends TupleScheme<WorkerSummary> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, WorkerSummary struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ BitSet optionals = new BitSet();
+ if (struct.is_set_supervisor_id()) {
+ optionals.set(0);
+ }
+ if (struct.is_set_host()) {
+ optionals.set(1);
+ }
+ if (struct.is_set_port()) {
+ optionals.set(2);
+ }
+ if (struct.is_set_topology_id()) {
+ optionals.set(3);
+ }
+ if (struct.is_set_topology_name()) {
+ optionals.set(4);
+ }
+ if (struct.is_set_num_executors()) {
+ optionals.set(5);
+ }
+ if (struct.is_set_component_to_num_tasks()) {
+ optionals.set(6);
+ }
+ if (struct.is_set_time_secs()) {
+ optionals.set(7);
+ }
+ if (struct.is_set_uptime_secs()) {
+ optionals.set(8);
+ }
+ if (struct.is_set_requested_memonheap()) {
+ optionals.set(9);
+ }
+ if (struct.is_set_requested_memoffheap()) {
+ optionals.set(10);
+ }
+ if (struct.is_set_requested_cpu()) {
+ optionals.set(11);
+ }
+ if (struct.is_set_assigned_memonheap()) {
+ optionals.set(12);
+ }
+ if (struct.is_set_assigned_memoffheap()) {
+ optionals.set(13);
+ }
+ if (struct.is_set_assigned_cpu()) {
+ optionals.set(14);
+ }
+ oprot.writeBitSet(optionals, 15);
+ if (struct.is_set_supervisor_id()) {
+ oprot.writeString(struct.supervisor_id);
+ }
+ if (struct.is_set_host()) {
+ oprot.writeString(struct.host);
+ }
+ if (struct.is_set_port()) {
+ oprot.writeI32(struct.port);
+ }
+ if (struct.is_set_topology_id()) {
+ oprot.writeString(struct.topology_id);
+ }
+ if (struct.is_set_topology_name()) {
+ oprot.writeString(struct.topology_name);
+ }
+ if (struct.is_set_num_executors()) {
+ oprot.writeI32(struct.num_executors);
+ }
+ if (struct.is_set_component_to_num_tasks()) {
+ {
+ oprot.writeI32(struct.component_to_num_tasks.size());
+ for (Map.Entry<String, Long> _iter433 : struct.component_to_num_tasks.entrySet())
+ {
+ oprot.writeString(_iter433.getKey());
+ oprot.writeI64(_iter433.getValue());
+ }
+ }
+ }
+ if (struct.is_set_time_secs()) {
+ oprot.writeI32(struct.time_secs);
+ }
+ if (struct.is_set_uptime_secs()) {
+ oprot.writeI32(struct.uptime_secs);
+ }
+ if (struct.is_set_requested_memonheap()) {
+ oprot.writeDouble(struct.requested_memonheap);
+ }
+ if (struct.is_set_requested_memoffheap()) {
+ oprot.writeDouble(struct.requested_memoffheap);
+ }
+ if (struct.is_set_requested_cpu()) {
+ oprot.writeDouble(struct.requested_cpu);
+ }
+ if (struct.is_set_assigned_memonheap()) {
+ oprot.writeDouble(struct.assigned_memonheap);
+ }
+ if (struct.is_set_assigned_memoffheap()) {
+ oprot.writeDouble(struct.assigned_memoffheap);
+ }
+ if (struct.is_set_assigned_cpu()) {
+ oprot.writeDouble(struct.assigned_cpu);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, WorkerSummary struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ BitSet incoming = iprot.readBitSet(15);
+ if (incoming.get(0)) {
+ struct.supervisor_id = iprot.readString();
+ struct.set_supervisor_id_isSet(true);
+ }
+ if (incoming.get(1)) {
+ struct.host = iprot.readString();
+ struct.set_host_isSet(true);
+ }
+ if (incoming.get(2)) {
+ struct.port = iprot.readI32();
+ struct.set_port_isSet(true);
+ }
+ if (incoming.get(3)) {
+ struct.topology_id = iprot.readString();
+ struct.set_topology_id_isSet(true);
+ }
+ if (incoming.get(4)) {
+ struct.topology_name = iprot.readString();
+ struct.set_topology_name_isSet(true);
+ }
+ if (incoming.get(5)) {
+ struct.num_executors = iprot.readI32();
+ struct.set_num_executors_isSet(true);
+ }
+ if (incoming.get(6)) {
+ {
+ org.apache.thrift.protocol.TMap _map434 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, iprot.readI32());
+ struct.component_to_num_tasks = new HashMap<String,Long>(2*_map434.size);
+ String _key435;
+ long _val436;
+ for (int _i437 = 0; _i437 < _map434.size; ++_i437)
+ {
+ _key435 = iprot.readString();
+ _val436 = iprot.readI64();
+ struct.component_to_num_tasks.put(_key435, _val436);
+ }
+ }
+ struct.set_component_to_num_tasks_isSet(true);
+ }
+ if (incoming.get(7)) {
+ struct.time_secs = iprot.readI32();
+ struct.set_time_secs_isSet(true);
+ }
+ if (incoming.get(8)) {
+ struct.uptime_secs = iprot.readI32();
+ struct.set_uptime_secs_isSet(true);
+ }
+ if (incoming.get(9)) {
+ struct.requested_memonheap = iprot.readDouble();
+ struct.set_requested_memonheap_isSet(true);
+ }
+ if (incoming.get(10)) {
+ struct.requested_memoffheap = iprot.readDouble();
+ struct.set_requested_memoffheap_isSet(true);
+ }
+ if (incoming.get(11)) {
+ struct.requested_cpu = iprot.readDouble();
+ struct.set_requested_cpu_isSet(true);
+ }
+ if (incoming.get(12)) {
+ struct.assigned_memonheap = iprot.readDouble();
+ struct.set_assigned_memonheap_isSet(true);
+ }
+ if (incoming.get(13)) {
+ struct.assigned_memoffheap = iprot.readDouble();
+ struct.set_assigned_memoffheap_isSet(true);
+ }
+ if (incoming.get(14)) {
+ struct.assigned_cpu = iprot.readDouble();
+ struct.set_assigned_cpu_isSet(true);
+ }
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/storm/blob/4de339a8/storm-client/src/jvm/org/apache/storm/grouping/CustomStreamGrouping.java
----------------------------------------------------------------------
diff --git a/storm-client/src/jvm/org/apache/storm/grouping/CustomStreamGrouping.java b/storm-client/src/jvm/org/apache/storm/grouping/CustomStreamGrouping.java
new file mode 100644
index 0000000..8f72b82
--- /dev/null
+++ b/storm-client/src/jvm/org/apache/storm/grouping/CustomStreamGrouping.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.grouping;
+
+import org.apache.storm.generated.GlobalStreamId;
+import org.apache.storm.task.WorkerTopologyContext;
+import java.io.Serializable;
+import java.util.List;
+
+public interface CustomStreamGrouping extends Serializable {
+
+ /**
+ * Tells the stream grouping at runtime the tasks in the target bolt.
+ * This information should be used in chooseTasks to determine the target tasks.
+ *
+ * It also tells the grouping the metadata on the stream this grouping will be used on.
+ */
+ void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks);
+
+ /**
+ * This function implements a custom stream grouping. It takes in as input
+ * the number of tasks in the target bolt in prepare and returns the
+ * tasks to send the tuples to.
+ *
+ * @param values the values to group on
+ */
+ List<Integer> chooseTasks(int taskId, List<Object> values);
+}
http://git-wip-us.apache.org/repos/asf/storm/blob/4de339a8/storm-client/src/jvm/org/apache/storm/grouping/Load.java
----------------------------------------------------------------------
diff --git a/storm-client/src/jvm/org/apache/storm/grouping/Load.java b/storm-client/src/jvm/org/apache/storm/grouping/Load.java
new file mode 100644
index 0000000..c0f5595
--- /dev/null
+++ b/storm-client/src/jvm/org/apache/storm/grouping/Load.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.grouping;
+
+import java.io.Serializable;
+
+/**
+ * Represents the load that a Bolt is currently under to help in
+ * deciding where to route a tuple, to help balance the load.
+ */
+public class Load {
+ private boolean hasMetrics = false;
+ private double boltLoad = 0.0; //0 no load to 1 fully loaded
+ private double connectionLoad = 0.0; //0 no load to 1 fully loaded
+
+ /**
+ * Create a new load
+ * @param hasMetrics have metrics been reported yet?
+ * @param boltLoad the load as reported by the bolt 0.0 no load 1.0 fully loaded
+ * @param connectionLoad the load as reported by the connection to the bolt 0.0 no load 1.0 fully loaded.
+ */
+ public Load(boolean hasMetrics, double boltLoad, double connectionLoad) {
+ this.hasMetrics = hasMetrics;
+ this.boltLoad = boltLoad;
+ this.connectionLoad = connectionLoad;
+ }
+
+ /**
+ * @return true if metrics have been reported so far.
+ */
+ public boolean hasMetrics() {
+ return hasMetrics;
+ }
+
+ /**
+ * @return the load as reported by the bolt.
+ */
+ public double getBoltLoad() {
+ return boltLoad;
+ }
+
+ /**
+ * @return the load as reported by the connection
+ */
+ public double getConnectionLoad() {
+ return connectionLoad;
+ }
+
+ /**
+ * @return the load that is a combination of sub loads.
+ */
+ public double getLoad() {
+ if (!hasMetrics) {
+ return 1.0;
+ }
+ return connectionLoad > boltLoad ? connectionLoad : boltLoad;
+ }
+
+ public String toString() {
+ return "[:load "+boltLoad+" "+connectionLoad+"]";
+ }
+}
http://git-wip-us.apache.org/repos/asf/storm/blob/4de339a8/storm-client/src/jvm/org/apache/storm/grouping/LoadAwareCustomStreamGrouping.java
----------------------------------------------------------------------
diff --git a/storm-client/src/jvm/org/apache/storm/grouping/LoadAwareCustomStreamGrouping.java b/storm-client/src/jvm/org/apache/storm/grouping/LoadAwareCustomStreamGrouping.java
new file mode 100644
index 0000000..d825d2e
--- /dev/null
+++ b/storm-client/src/jvm/org/apache/storm/grouping/LoadAwareCustomStreamGrouping.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.grouping;
+
+import java.util.List;
+
+public interface LoadAwareCustomStreamGrouping extends CustomStreamGrouping {
+ List<Integer> chooseTasks(int taskId, List<Object> values, LoadMapping load);
+}
http://git-wip-us.apache.org/repos/asf/storm/blob/4de339a8/storm-client/src/jvm/org/apache/storm/grouping/LoadAwareShuffleGrouping.java
----------------------------------------------------------------------
diff --git a/storm-client/src/jvm/org/apache/storm/grouping/LoadAwareShuffleGrouping.java b/storm-client/src/jvm/org/apache/storm/grouping/LoadAwareShuffleGrouping.java
new file mode 100644
index 0000000..9a07194
--- /dev/null
+++ b/storm-client/src/jvm/org/apache/storm/grouping/LoadAwareShuffleGrouping.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.grouping;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.storm.generated.GlobalStreamId;
+import org.apache.storm.task.WorkerTopologyContext;
+
+public class LoadAwareShuffleGrouping implements LoadAwareCustomStreamGrouping, Serializable {
+ private Random random;
+ private List<Integer>[] rets;
+ private int[] targets;
+ private int[] loads;
+ private int total;
+ private long lastUpdate = 0;
+
+ @Override
+ public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
+ random = new Random();
+ rets = (List<Integer>[])new List<?>[targetTasks.size()];
+ targets = new int[targetTasks.size()];
+ for (int i = 0; i < targets.length; i++) {
+ rets[i] = Arrays.asList(targetTasks.get(i));
+ targets[i] = targetTasks.get(i);
+ }
+ loads = new int[targets.length];
+ }
+
+ @Override
+ public List<Integer> chooseTasks(int taskId, List<Object> values) {
+ throw new RuntimeException("NOT IMPLEMENTED");
+ }
+
+ @Override
+ public List<Integer> chooseTasks(int taskId, List<Object> values, LoadMapping load) {
+ if ((lastUpdate + 1000) < System.currentTimeMillis()) {
+ int local_total = 0;
+ for (int i = 0; i < targets.length; i++) {
+ int val = (int)(101 - (load.get(targets[i]) * 100));
+ loads[i] = val;
+ local_total += val;
+ }
+ total = local_total;
+ lastUpdate = System.currentTimeMillis();
+ }
+ int selected = random.nextInt(total);
+ int sum = 0;
+ for (int i = 0; i < targets.length; i++) {
+ sum += loads[i];
+ if (selected < sum) {
+ return rets[i];
+ }
+ }
+ return rets[rets.length-1];
+ }
+}
http://git-wip-us.apache.org/repos/asf/storm/blob/4de339a8/storm-client/src/jvm/org/apache/storm/grouping/LoadMapping.java
----------------------------------------------------------------------
diff --git a/storm-client/src/jvm/org/apache/storm/grouping/LoadMapping.java b/storm-client/src/jvm/org/apache/storm/grouping/LoadMapping.java
new file mode 100644
index 0000000..a6373c9
--- /dev/null
+++ b/storm-client/src/jvm/org/apache/storm/grouping/LoadMapping.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.grouping;
+
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.Map;
+import java.util.HashMap;
+
+/**
+ * Holds a list of the current loads
+ */
+public class LoadMapping {
+ private static final Load NOT_CONNECTED = new Load(false, 1.0, 1.0);
+ private final AtomicReference<Map<Integer,Load>> _local = new AtomicReference<Map<Integer,Load>>(new HashMap<Integer,Load>());
+ private final AtomicReference<Map<Integer,Load>> _remote = new AtomicReference<Map<Integer,Load>>(new HashMap<Integer,Load>());
+
+ public void setLocal(Map<Integer, Double> local) {
+ Map<Integer, Load> newLocal = new HashMap<Integer, Load>();
+ if (local != null) {
+ for (Map.Entry<Integer, Double> entry: local.entrySet()) {
+ newLocal.put(entry.getKey(), new Load(true, entry.getValue(), 0.0));
+ }
+ }
+ _local.set(newLocal);
+ }
+
+ public void setRemote(Map<Integer, Load> remote) {
+ if (remote != null) {
+ _remote.set(new HashMap<Integer, Load>(remote));
+ } else {
+ _remote.set(new HashMap<Integer, Load>());
+ }
+ }
+
+ public Load getLoad(int task) {
+ Load ret = _local.get().get(task);
+ if (ret == null) {
+ ret = _remote.get().get(task);
+ }
+ if (ret == null) {
+ ret = NOT_CONNECTED;
+ }
+ return ret;
+ }
+
+ public double get(int task) {
+ return getLoad(task).getLoad();
+ }
+}
http://git-wip-us.apache.org/repos/asf/storm/blob/4de339a8/storm-client/src/jvm/org/apache/storm/grouping/PartialKeyGrouping.java
----------------------------------------------------------------------
diff --git a/storm-client/src/jvm/org/apache/storm/grouping/PartialKeyGrouping.java b/storm-client/src/jvm/org/apache/storm/grouping/PartialKeyGrouping.java
new file mode 100644
index 0000000..e1af16d
--- /dev/null
+++ b/storm-client/src/jvm/org/apache/storm/grouping/PartialKeyGrouping.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.grouping;
+
+import java.io.Serializable;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.storm.generated.GlobalStreamId;
+import org.apache.storm.task.WorkerTopologyContext;
+import org.apache.storm.tuple.Fields;
+
+import com.google.common.hash.HashFunction;
+import com.google.common.hash.Hashing;
+
+public class PartialKeyGrouping implements CustomStreamGrouping, Serializable {
+ private static final long serialVersionUID = -447379837314000353L;
+ private List<Integer> targetTasks;
+ private long[] targetTaskStats;
+ private HashFunction h1 = Hashing.murmur3_128(13);
+ private HashFunction h2 = Hashing.murmur3_128(17);
+ private Fields fields = null;
+ private Fields outFields = null;
+
+ public PartialKeyGrouping() {
+ //Empty
+ }
+
+ public PartialKeyGrouping(Fields fields) {
+ this.fields = fields;
+ }
+
+ @Override
+ public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
+ this.targetTasks = targetTasks;
+ targetTaskStats = new long[this.targetTasks.size()];
+ if (this.fields != null) {
+ this.outFields = context.getComponentOutputFields(stream);
+ }
+ }
+
+ @Override
+ public List<Integer> chooseTasks(int taskId, List<Object> values) {
+ List<Integer> boltIds = new ArrayList<>(1);
+ if (values.size() > 0) {
+ byte[] raw;
+ if (fields != null) {
+ List<Object> selectedFields = outFields.select(fields, values);
+ ByteBuffer out = ByteBuffer.allocate(selectedFields.size() * 4);
+ for (Object o: selectedFields) {
+ if (o instanceof List) {
+ out.putInt(Arrays.deepHashCode(((List)o).toArray()));
+ } else if (o instanceof Object[]) {
+ out.putInt(Arrays.deepHashCode((Object[])o));
+ } else if (o instanceof byte[]) {
+ out.putInt(Arrays.hashCode((byte[]) o));
+ } else if (o instanceof short[]) {
+ out.putInt(Arrays.hashCode((short[]) o));
+ } else if (o instanceof int[]) {
+ out.putInt(Arrays.hashCode((int[]) o));
+ } else if (o instanceof long[]) {
+ out.putInt(Arrays.hashCode((long[]) o));
+ } else if (o instanceof char[]) {
+ out.putInt(Arrays.hashCode((char[]) o));
+ } else if (o instanceof float[]) {
+ out.putInt(Arrays.hashCode((float[]) o));
+ } else if (o instanceof double[]) {
+ out.putInt(Arrays.hashCode((double[]) o));
+ } else if (o instanceof boolean[]) {
+ out.putInt(Arrays.hashCode((boolean[]) o));
+ } else if (o != null) {
+ out.putInt(o.hashCode());
+ } else {
+ out.putInt(0);
+ }
+ }
+ raw = out.array();
+ } else {
+ raw = values.get(0).toString().getBytes(); // assume key is the first field
+ }
+ int firstChoice = (int) (Math.abs(h1.hashBytes(raw).asLong()) % this.targetTasks.size());
+ int secondChoice = (int) (Math.abs(h2.hashBytes(raw).asLong()) % this.targetTasks.size());
+ int selected = targetTaskStats[firstChoice] > targetTaskStats[secondChoice] ? secondChoice : firstChoice;
+ boltIds.add(targetTasks.get(selected));
+ targetTaskStats[selected]++;
+ }
+ return boltIds;
+ }
+}